use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAdmin1 method verifyRoundRobinDistribution.
protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int expectedRegions) throws IOException {
int numRS = c.getCurrentNrHRS();
List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
Map<ServerName, List<HRegionInfo>> server2Regions = new HashMap<>();
for (HRegionLocation loc : regions) {
ServerName server = loc.getServerName();
List<HRegionInfo> regs = server2Regions.get(server);
if (regs == null) {
regs = new ArrayList<>();
server2Regions.put(server, regs);
}
regs.add(loc.getRegionInfo());
}
if (numRS >= 2) {
// Ignore the master region server,
// which contains less regions by intention.
numRS--;
}
float average = (float) expectedRegions / numRS;
int min = (int) Math.floor(average);
int max = (int) Math.ceil(average);
for (List<HRegionInfo> regionList : server2Regions.values()) {
assertTrue(regionList.size() == min || regionList.size() == max);
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAdmin1 method splitTest.
void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize) throws Exception {
TableName tableName = TableName.valueOf("testForceSplit");
StringBuilder sb = new StringBuilder();
// Add tail to String so can see better in logs where a test is running.
for (int i = 0; i < rowCounts.length; i++) {
sb.append("_").append(Integer.toString(rowCounts[i]));
}
assertFalse(admin.tableExists(tableName));
try (final Table table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize);
final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
int rowCount = 0;
byte[] q = new byte[0];
// in a specific column family is decided by rowCounts[familyIndex]
for (int index = 0; index < familyNames.length; index++) {
ArrayList<Put> puts = new ArrayList<>(rowCounts[index]);
for (int i = 0; i < rowCounts[index]; i++) {
byte[] k = Bytes.toBytes(i);
Put put = new Put(k);
put.addColumn(familyNames[index], q, k);
puts.add(put);
}
table.put(puts);
if (rowCount < rowCounts[index]) {
rowCount = rowCounts[index];
}
}
// get the initial layout (should just be one region)
List<HRegionLocation> m = locator.getAllRegionLocations();
LOG.info("Initial regions (" + m.size() + "): " + m);
assertTrue(m.size() == 1);
// Verify row count
Scan scan = new Scan();
ResultScanner scanner = table.getScanner(scan);
int rows = 0;
for (@SuppressWarnings("unused") Result result : scanner) {
rows++;
}
scanner.close();
assertEquals(rowCount, rows);
// Have an outstanding scan going on to make sure we can scan over splits.
scan = new Scan();
scanner = table.getScanner(scan);
// Scan first row so we are into first region before split happens.
scanner.next();
// Split the table
this.admin.split(tableName, splitPoint);
final AtomicInteger count = new AtomicInteger(0);
Thread t = new Thread("CheckForSplit") {
@Override
public void run() {
for (int i = 0; i < 45; i++) {
try {
sleep(1000);
} catch (InterruptedException e) {
continue;
}
// check again
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
if (regions == null)
continue;
count.set(regions.size());
if (count.get() >= 2) {
LOG.info("Found: " + regions);
break;
}
LOG.debug("Cycle waiting on split");
}
LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
}
};
t.setPriority(Thread.NORM_PRIORITY - 2);
t.start();
t.join();
// Verify row count
// We counted one row above.
rows = 1;
for (@SuppressWarnings("unused") Result result : scanner) {
rows++;
if (rows > rowCount) {
scanner.close();
assertTrue("Scanned more than expected (" + rowCount + ")", false);
}
}
scanner.close();
assertEquals(rowCount, rows);
List<HRegionLocation> regions = null;
try {
regions = locator.getAllRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
assertEquals(2, regions.size());
if (splitPoint != null) {
// make sure the split point matches our explicit configuration
assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(0).getRegionInfo().getEndKey()));
assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(1).getRegionInfo().getStartKey()));
LOG.debug("Properly split on " + Bytes.toString(splitPoint));
} else {
if (familyNames.length > 1) {
int splitKey = Bytes.toInt(regions.get(0).getRegionInfo().getEndKey());
// check if splitKey is based on the largest column family
// in terms of it store size
int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey);
LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + regions.get(0).getRegionInfo());
for (int index = 0; index < familyNames.length; index++) {
int delta = Math.abs(rowCounts[index] / 2 - splitKey);
if (delta < deltaForLargestFamily) {
assertTrue("Delta " + delta + " for family " + index + " should be at least " + "deltaForLargestFamily " + deltaForLargestFamily, false);
}
}
}
}
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAdmin1 method testEnableTableRetainAssignment.
/**
* Test retain assignment on enableTable.
*
* @throws IOException
*/
@Test(timeout = 300000)
public void testEnableTableRetainAssignment() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
int expectedRegions = splitKeys.length + 1;
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, splitKeys);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
List<HRegionLocation> regions = l.getAllRegionLocations();
assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
// Disable table.
admin.disableTable(tableName);
// Enable table, use retain assignment to assign regions.
admin.enableTable(tableName);
List<HRegionLocation> regions2 = l.getAllRegionLocations();
// Check the assignment.
assertEquals(regions.size(), regions2.size());
assertTrue(regions2.containsAll(regions));
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAdmin1 method testCreateTableNumberOfRegions.
@Test(timeout = 300000)
public void testCreateTableNumberOfRegions() throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
List<HRegionLocation> regions;
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 1 region", 1, regions.size());
}
TableName TABLE_2 = TableName.valueOf(tableName.getNameAsString() + "_2");
desc = new HTableDescriptor(TABLE_2);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, new byte[][] { new byte[] { 42 } });
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 2 region", 2, regions.size());
}
TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3");
desc = new HTableDescriptor(TABLE_3);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 3 region", 3, regions.size());
}
TableName TABLE_4 = TableName.valueOf(tableName.getNameAsString() + "_4");
desc = new HTableDescriptor(TABLE_4);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
try {
admin.createTable(desc, "a".getBytes(), "z".getBytes(), 2);
fail("Should not be able to create a table with only 2 regions using this API.");
} catch (IllegalArgumentException eae) {
// Expected
}
TableName TABLE_5 = TableName.valueOf(tableName.getNameAsString() + "_5");
desc = new HTableDescriptor(TABLE_5);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, new byte[] { 1 }, new byte[] { 127 }, 16);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_5)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have 16 region", 16, regions.size());
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestZKAsyncRegistry method test.
@Test
public void test() throws InterruptedException, ExecutionException, IOException {
assertEquals(TEST_UTIL.getHBaseCluster().getClusterStatus().getClusterId(), REGISTRY.getClusterId().get());
assertEquals(TEST_UTIL.getHBaseCluster().getClusterStatus().getServersSize(), REGISTRY.getCurrentNrHRS().get().intValue());
assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getMasterAddress().get());
assertEquals(-1, REGISTRY.getMasterInfoPort().get().intValue());
waitUntilAllReplicasHavingRegionLocation(TableName.META_TABLE_NAME);
RegionLocations locs = REGISTRY.getMetaRegionLocation().get();
assertEquals(3, locs.getRegionLocations().length);
IntStream.range(0, 3).forEach(i -> {
HRegionLocation loc = locs.getRegionLocation(i);
assertNotNull("Replica " + i + " doesn't have location", loc);
assertTrue(loc.getRegionInfo().getTable().equals(TableName.META_TABLE_NAME));
assertEquals(i, loc.getRegionInfo().getReplicaId());
});
}
Aggregations