use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionReplicas method before.
@BeforeClass
public static void before() throws Exception {
// Reduce the hdfs block size and prefetch to trigger the file-link reopen
// when the file is moved to archive (e.g. compaction)
HTU.getConfiguration().setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 8192);
HTU.getConfiguration().setInt(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 1);
HTU.getConfiguration().setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 128 * 1024 * 1024);
HTU.startMiniCluster(NB_SERVERS);
final TableName tableName = TableName.valueOf(TestRegionReplicas.class.getSimpleName());
// Create table then get the single region for our new table.
table = HTU.createTable(tableName, f);
try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
}
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestHBaseFsckOneRS method testSplitDaughtersNotInMeta.
/**
* Split crashed after write to hbase:meta finished for the parent region, but
* failed to write daughters (pre HBASE-7721 codebase)
*/
@Test(timeout = 75000)
public void testSplitDaughtersNotInMeta() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
try {
setupTable(tableName);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in wal only there is no data loss
admin.flush(tableName);
try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
HRegionInfo hri = location.getRegionInfo();
// Disable CatalogJanitor to prevent it from cleaning up the parent region
// after split.
admin.enableCatalogJanitor(false);
// do a regular split
byte[] regionName = location.getRegionInfo().getRegionName();
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(meta.get(new Get(regionName)));
// Delete daughter regions from meta, but not hdfs, unassign it.
ServerName firstSN = rl.getRegionLocation(daughters.getFirst().getStartKey()).getServerName();
ServerName secondSN = rl.getRegionLocation(daughters.getSecond().getStartKey()).getServerName();
undeployRegion(connection, firstSN, daughters.getFirst());
undeployRegion(connection, secondSN, daughters.getSecond());
List<Delete> deletes = new ArrayList<>(2);
deletes.add(new Delete(daughters.getFirst().getRegionName()));
deletes.add(new Delete(daughters.getSecond().getRegionName()));
meta.delete(deletes);
// Remove daughters from regionStates
RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
regionStates.deleteRegion(daughters.getFirst());
regionStates.deleteRegion(daughters.getSecond());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, //no LINGERING_SPLIT_PARENT
HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN });
// now fix it. The fix should not revert the region split, but add daughters to META
hbck = doFsck(conf, true, true, false, false, false, false, false, false, false, false, false, false, null);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN });
// assert that the split hbase:meta entry is still there.
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
assertNotNull(MetaTableAccessor.getHRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
// assert that we still have the split regions
//SPLITS + 1 is # regions
assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1);
// pre-split.
//should be fixed by now
assertNoErrors(doFsck(conf, false));
}
} finally {
admin.enableCatalogJanitor(true);
meta.close();
cleanupTable(tableName);
}
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestHBaseFsckOneRS method testValidLingeringSplitParent.
/**
* Tests that LINGERING_SPLIT_PARENT is not erroneously reported for
* valid cases where the daughters are there.
*/
@Test(timeout = 180000)
public void testValidLingeringSplitParent() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
Table meta = null;
try {
setupTable(tableName);
assertEquals(ROWKEYS.length, countRows());
// make sure data in regions, if in wal only there is no data loss
admin.flush(tableName);
try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
HRegionInfo hri = location.getRegionInfo();
// do a regular split
byte[] regionName = location.getRegionInfo().getRegionName();
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
// TODO: fixHdfsHoles does not work against splits, since the parent dir lingers on
// for some time until children references are deleted. HBCK erroneously sees this as
// overlapping regions
HBaseFsck hbck = doFsck(conf, true, true, false, false, false, true, true, true, true, false, false, false, null);
// no LINGERING_SPLIT_PARENT reported
//no LINGERING_SPLIT_PARENT reported
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {});
// assert that the split hbase:meta entry is still there.
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
assertNotNull(MetaTableAccessor.getHRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
// assert that we still have the split regions
//SPLITS + 1 is # regions pre-split.
assertEquals(rl.getStartKeys().length, SPLITS.length + 1 + 1);
assertNoErrors(doFsck(conf, false));
}
} finally {
cleanupTable(tableName);
IOUtils.closeQuietly(meta);
}
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionSizeCalculator method mockRegionLocator.
/**
* Makes some table with given region names.
* */
private RegionLocator mockRegionLocator(String... regionNames) throws IOException {
RegionLocator mockedTable = Mockito.mock(RegionLocator.class);
when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable"));
List<HRegionLocation> regionLocations = new ArrayList<>(regionNames.length);
when(mockedTable.getAllRegionLocations()).thenReturn(regionLocations);
for (String regionName : regionNames) {
HRegionInfo info = Mockito.mock(HRegionInfo.class);
when(info.getRegionName()).thenReturn(regionName.getBytes());
regionLocations.add(new HRegionLocation(info, sn));
}
return mockedTable;
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class TestRegionSplitter method verifyBounds.
private void verifyBounds(List<byte[]> expectedBounds, TableName tableName) throws Exception {
// Get region boundaries from the cluster and verify their endpoints
final int numRegions = expectedBounds.size() - 1;
try (Table table = UTIL.getConnection().getTable(tableName);
RegionLocator locator = UTIL.getConnection().getRegionLocator(tableName)) {
final List<HRegionLocation> regionInfoMap = locator.getAllRegionLocations();
assertEquals(numRegions, regionInfoMap.size());
for (HRegionLocation entry : regionInfoMap) {
final HRegionInfo regionInfo = entry.getRegionInfo();
byte[] regionStart = regionInfo.getStartKey();
byte[] regionEnd = regionInfo.getEndKey();
// This region's start key should be one of the region boundaries
int startBoundaryIndex = indexOfBytes(expectedBounds, regionStart);
assertNotSame(-1, startBoundaryIndex);
// This region's end key should be the region boundary that comes
// after the starting boundary.
byte[] expectedRegionEnd = expectedBounds.get(startBoundaryIndex + 1);
assertEquals(0, Bytes.compareTo(regionEnd, expectedRegionEnd));
}
}
}
Aggregations