use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestNamespaceAuditor method testRegionMerge.
@Test
public void testRegionMerge() throws Exception {
String nsp1 = prefix + "_regiontest";
final int initialRegions = 3;
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
byte[] columnFamily = Bytes.toBytes("info");
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableTwo).setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build();
ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
try (Table table = connection.getTable(tableTwo)) {
UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
}
ADMIN.flush(tableTwo);
List<RegionInfo> hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
Future<?> f = ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
f.get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions - 1, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
byte[] splitKey = Bytes.toBytes("3");
HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream().filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get();
regionToSplit.compact(true);
// Waiting for compaction to finish
UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (CompactionState.NONE == ADMIN.getCompactionStateForRegion(regionToSplit.getRegionInfo().getRegionName()));
}
});
// Cleaning compacted references for split to proceed
regionToSplit.getStores().stream().forEach(s -> {
try {
s.closeAndArchiveCompactedFiles();
} catch (IOException e1) {
LOG.error("Error whiling cleaning compacted file");
}
});
// the above compact may quit immediately if there is a compaction ongoing, so here we need to
// wait a while to let the ongoing compaction finish.
UTIL.waitFor(10000, regionToSplit::isSplittable);
ADMIN.splitRegionAsync(regionToSplit.getRegionInfo().getRegionName(), splitKey).get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// Fail region merge through Coprocessor hook
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class);
CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
masterObserver.failMerge(true);
f = ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
try {
f.get(10, TimeUnit.SECONDS);
fail("Merge was supposed to fail!");
} catch (ExecutionException ee) {
// Expected.
}
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// verify that we cannot split
try {
ADMIN.split(tableTwo, Bytes.toBytes("6"));
fail();
} catch (DoNotRetryRegionException e) {
// Expected
}
Thread.sleep(2000);
assertEquals(initialRegions, ADMIN.getRegions(tableTwo).size());
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsAfterRegionMovedWithMultiCF.
/**
* @throws Exception
*/
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
byte[] family1 = Bytes.toBytes("cf1");
byte[] family2 = Bytes.toBytes("cf2");
byte[] qualifier = Bytes.toBytes("q");
byte[] value = Bytes.toBytes("testV");
byte[][] familys = { family1, family2 };
TEST_UTIL.createTable(tableName, familys);
Table htable = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(Bytes.toBytes("r1"));
put.addColumn(family1, qualifier, value);
htable.put(put);
ResultScanner resultScanner = htable.getScanner(new Scan());
int count = 0;
while (resultScanner.next() != null) {
count++;
}
resultScanner.close();
assertEquals(1, count);
SingleProcessHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
List<HRegion> regions = hbaseCluster.getRegions(tableName);
assertEquals(1, regions.size());
// move region to another regionserver
Region destRegion = regions.get(0);
int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName());
assertTrue("Please start more than 1 regionserver", hbaseCluster.getRegionServerThreads().size() > 1);
int destServerNum = 0;
while (destServerNum == originServerNum) {
destServerNum++;
}
HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
// move region to destination regionserver
TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName());
// delete the row
Delete del = new Delete(Bytes.toBytes("r1"));
htable.delete(del);
resultScanner = htable.getScanner(new Scan());
count = 0;
while (resultScanner.next() != null) {
count++;
}
resultScanner.close();
assertEquals(0, count);
// flush region and make major compaction
HRegion region = (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
region.flush(true);
// wait to complete major compaction
for (HStore store : region.getStores()) {
store.triggerMajorCompaction();
}
region.compact(true);
// move region to origin regionserver
TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName());
// abort the origin regionserver
originServer.abort("testing");
// see what we get
Result result = htable.get(new Get(Bytes.toBytes("r1")));
if (result != null) {
assertTrue("Row is deleted, but we get" + result.toString(), (result == null) || result.isEmpty());
}
resultScanner.close();
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestFSErrorsExposed method testFullSystemBubblesFSErrors.
/**
* Cluster test which starts a region server with a region, then
* removes the data from HDFS underneath it, and ensures that
* errors are bubbled to the client.
*/
@Test
public void testFullSystemBubblesFSErrors() throws Exception {
// We won't have an error if the datanode is not there if we use short circuit
// it's a known 'feature'.
Assume.assumeTrue(!util.isReadShortCircuitOn());
try {
// Make it fail faster.
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 90000);
util.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000);
util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
util.startMiniCluster(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[] fam = Bytes.toBytes("fam");
Admin admin = util.getAdmin();
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setMaxVersions(1).setBlockCacheEnabled(false).build()).build();
admin.createTable(tableDescriptor);
// above configuration on it; else we use the old one w/ 10 as default.
try (Table table = util.getConnection().getTable(tableName)) {
// Load some data
util.loadTable(table, fam, false);
util.flush();
util.countRows(table);
// Kill the DFS cluster
util.getDFSCluster().shutdownDataNodes();
try {
util.countRows(table);
fail("Did not fail to count after removing data");
} catch (Exception e) {
LOG.info("Got expected error", e);
assertTrue(e.getMessage().contains("Could not seek"));
}
}
// Restart data nodes so that HBase can shut down cleanly.
util.getDFSCluster().restartDataNodes();
} finally {
SingleProcessHBaseCluster cluster = util.getMiniHBaseCluster();
if (cluster != null)
cluster.killAll();
util.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestFIFOCompactionPolicy method getStoreWithName.
private HStore getStoreWithName(TableName tableName) {
SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
HRegionServer hrs = rsts.get(i).getRegionServer();
for (HRegion region : hrs.getRegions(tableName)) {
return region.getStores().iterator().next();
}
}
return null;
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestCompactionWithThroughputController method getStoreWithName.
private HStore getStoreWithName(TableName tableName) {
SingleProcessHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
HRegionServer hrs = rsts.get(i).getRegionServer();
for (Region region : hrs.getRegions(tableName)) {
return ((HRegion) region).getStores().iterator().next();
}
}
return null;
}
Aggregations