use of org.apache.hadoop.hbase.client.DoNotRetryRegionException in project hbase by apache.
the class TestNamespaceAuditor method testRegionMerge.
@Test
public void testRegionMerge() throws Exception {
String nsp1 = prefix + "_regiontest";
final int initialRegions = 3;
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
byte[] columnFamily = Bytes.toBytes("info");
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableTwo).setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build();
ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
try (Table table = connection.getTable(tableTwo)) {
UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
}
ADMIN.flush(tableTwo);
List<RegionInfo> hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
Future<?> f = ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
f.get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions - 1, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
byte[] splitKey = Bytes.toBytes("3");
HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream().filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get();
regionToSplit.compact(true);
// Waiting for compaction to finish
UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (CompactionState.NONE == ADMIN.getCompactionStateForRegion(regionToSplit.getRegionInfo().getRegionName()));
}
});
// Cleaning compacted references for split to proceed
regionToSplit.getStores().stream().forEach(s -> {
try {
s.closeAndArchiveCompactedFiles();
} catch (IOException e1) {
LOG.error("Error whiling cleaning compacted file");
}
});
// the above compact may quit immediately if there is a compaction ongoing, so here we need to
// wait a while to let the ongoing compaction finish.
UTIL.waitFor(10000, regionToSplit::isSplittable);
ADMIN.splitRegionAsync(regionToSplit.getRegionInfo().getRegionName(), splitKey).get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// Fail region merge through Coprocessor hook
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class);
CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
masterObserver.failMerge(true);
f = ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
try {
f.get(10, TimeUnit.SECONDS);
fail("Merge was supposed to fail!");
} catch (ExecutionException ee) {
// Expected.
}
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// verify that we cannot split
try {
ADMIN.split(tableTwo, Bytes.toBytes("6"));
fail();
} catch (DoNotRetryRegionException e) {
// Expected
}
Thread.sleep(2000);
assertEquals(initialRegions, ADMIN.getRegions(tableTwo).size());
}
Aggregations