use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRegionMover method testLoadWithoutAck.
/** Test to unload a regionserver first and then load it using no Ack mode
* we check if some regions are loaded on the region server(since no ack is best effort)
* @throws Exception
*/
@Test
public void testLoadWithoutAck() throws Exception {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HRegionServer regionServer = cluster.getRegionServer(0);
String rsName = regionServer.getServerName().getHostname();
int port = regionServer.getServerName().getPort();
int noRegions = regionServer.getNumberOfOnlineRegions();
String rs = rsName + ":" + Integer.toString(port);
RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rs).ack(true);
RegionMover rm = rmBuilder.build();
rm.setConf(TEST_UTIL.getConfiguration());
LOG.info("Unloading " + rs);
rm.unload();
assertEquals(0, regionServer.getNumberOfOnlineRegions());
LOG.info("Successfully Unloaded\nNow Loading");
rm = rmBuilder.ack(false).build();
rm.setConf(TEST_UTIL.getConfiguration());
rm.load();
TEST_UTIL.waitFor(5000, 500, new Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return regionServer.getNumberOfOnlineRegions() > 0;
}
});
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRegionMover method testLoadWithAck.
@Test
public void testLoadWithAck() throws Exception {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HRegionServer regionServer = cluster.getRegionServer(0);
String rsName = regionServer.getServerName().getHostname();
int port = regionServer.getServerName().getPort();
int noRegions = regionServer.getNumberOfOnlineRegions();
String rs = rsName + ":" + Integer.toString(port);
RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rs).ack(true).maxthreads(8);
RegionMover rm = rmBuilder.build();
rm.setConf(TEST_UTIL.getConfiguration());
LOG.info("Unloading " + rs);
rm.unload();
assertEquals(0, regionServer.getNumberOfOnlineRegions());
LOG.info("Successfully Unloaded\nNow Loading");
rm.load();
assertEquals(noRegions, regionServer.getNumberOfOnlineRegions());
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestHBaseFsckOneRS method testRegionShouldNotBeDeployed.
/**
* The region is not deployed when the table is disabled.
*/
@Test(timeout = 180000)
public void testRegionShouldNotBeDeployed() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
LOG.info("Starting testRegionShouldNotBeDeployed.");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster());
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
HTableDescriptor htdDisabled = new HTableDescriptor(tableName);
htdDisabled.addFamily(new HColumnDescriptor(FAM));
// Write the .tableinfo
FSTableDescriptors fstd = new FSTableDescriptors(conf);
fstd.createTableDescriptor(htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS);
// Let's just assign everything to first RS
HRegionServer hrs = cluster.getRegionServer(0);
// Create region files.
admin.disableTable(tableName);
admin.enableTable(tableName);
// Disable the table and close its regions
admin.disableTable(tableName);
HRegionInfo region = disabledRegions.remove(0);
byte[] regionName = region.getRegionName();
// The region should not be assigned currently
assertTrue(cluster.getServerWith(regionName) == -1);
// Directly open a region on a region server.
// If going through AM/ZK, the region won't be open.
// Even it is opened, AM will close it which causes
// flakiness of this test.
HRegion r = HRegion.openHRegion(region, htdDisabled, hrs.getWAL(region), conf);
hrs.addToOnlineRegions(r);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.SHOULD_NOT_BE_DEPLOYED });
// fix this fault
doFsck(conf, true);
// check result
assertNoErrors(doFsck(conf, false));
} finally {
admin.enableTable(tableName);
cleanupTable(tableName);
}
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.
@Test(timeout = 30000)
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
BuggyMasterObserver cp = (BuggyMasterObserver) host.findCoprocessor(BuggyMasterObserver.class.getName());
assertFalse("No table created yet", cp.wasCreateTableCalled());
// set a watch on the zookeeper /hbase/master node. If the master dies,
// the node will be deleted.
ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK error: " + why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK master tracker error, why=", e);
}
@Override
public boolean isAborted() {
return false;
}
});
masterTracker.start();
zkw.registerListener(masterTracker);
// Test (part of the) output that should have be printed by master when it aborts:
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
// Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
// Master will then abort and the /hbase/master zk node will be deleted.
createTableThread.start();
// Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
for (int i = 0; i < 30; i++) {
if (masterTracker.masterZKNodeWasDeleted == true) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail("InterruptedException while waiting for master zk node to " + "be deleted.");
}
}
assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
createTableThread.interrupt();
try {
createTableThread.join(1000);
} catch (InterruptedException e) {
assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
}
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestMasterObserver method testSnapshotOperations.
@Test(timeout = 180000)
public void testSnapshotOperations() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
cp.resetStates();
// create a table
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
Admin admin = UTIL.getAdmin();
tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd);
tableCreationLatch.await();
tableCreationLatch = new CountDownLatch(1);
admin.disableTable(tableName);
assertTrue(admin.isTableDisabled(tableName));
try {
// Test snapshot operation
assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled());
admin.snapshot(TEST_SNAPSHOT, tableName);
assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled());
//Test list operation
admin.listSnapshots();
assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled());
// Test clone operation
admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
assertTrue("Coprocessor should have been called on snapshot clone", cp.wasCloneSnapshotCalled());
assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled());
admin.disableTable(TEST_CLONE);
assertTrue(admin.isTableDisabled(tableName));
deleteTable(admin, TEST_CLONE);
// Test restore operation
cp.resetStates();
admin.restoreSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot restore", cp.wasRestoreSnapshotCalled());
assertFalse("Coprocessor clone should not have been called on snapshot restore", cp.wasCloneSnapshotCalled());
admin.deleteSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled());
} finally {
deleteTable(admin, tableName);
}
}
Aggregations