use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestReplicationChangingPeerRegionservers method testChangingNumberOfPeerRegionServers.
@Test
public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException {
LOG.info("testSimplePutDelete");
SingleProcessHBaseCluster peerCluster = UTIL2.getMiniHBaseCluster();
// This test wants two RS's up. We only run one generally so add one.
peerCluster.startRegionServer();
Waiter.waitFor(peerCluster.getConfiguration(), 30000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return peerCluster.getLiveRegionServerThreads().size() > 1;
}
});
int numRS = peerCluster.getRegionServerThreads().size();
doPutTest(Bytes.toBytes(1));
int rsToStop = peerCluster.getServerWithMeta() == 0 ? 1 : 0;
peerCluster.stopRegionServer(rsToStop);
peerCluster.waitOnRegionServer(rsToStop);
// Sanity check
assertEquals(numRS - 1, peerCluster.getRegionServerThreads().size());
doPutTest(Bytes.toBytes(2));
peerCluster.startRegionServer();
// Sanity check
assertEquals(numRS, peerCluster.getRegionServerThreads().size());
doPutTest(Bytes.toBytes(3));
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestMasterObserver method testSnapshotOperations.
@Test
public void testSnapshotOperations() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
cp.resetStates();
// create a table
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build();
Admin admin = UTIL.getAdmin();
tableCreationLatch = new CountDownLatch(1);
admin.createTable(tableDescriptor);
tableCreationLatch.await();
tableCreationLatch = new CountDownLatch(1);
admin.disableTable(tableName);
assertTrue(admin.isTableDisabled(tableName));
try {
// Test snapshot operation
assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled());
admin.snapshot(TEST_SNAPSHOT, tableName);
assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled());
// Test list operation
admin.listSnapshots();
assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled());
// Test clone operation
admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
assertTrue("Coprocessor should have been called on snapshot clone", cp.wasCloneSnapshotCalled());
assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled());
admin.disableTable(TEST_CLONE);
assertTrue(admin.isTableDisabled(tableName));
deleteTable(admin, TEST_CLONE);
// Test restore operation
cp.resetStates();
admin.restoreSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot restore", cp.wasRestoreSnapshotCalled());
assertFalse("Coprocessor clone should not have been called on snapshot restore", cp.wasCloneSnapshotCalled());
admin.deleteSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled());
} finally {
deleteTable(admin, tableName);
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.
@Test
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
assertFalse("No table created yet", cp.wasCreateTableCalled());
// set a watch on the zookeeper /hbase/master node. If the master dies,
// the node will be deleted.
ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK error: " + why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK master tracker error, why=", e);
}
@Override
public boolean isAborted() {
return false;
}
});
masterTracker.start();
zkw.registerListener(masterTracker);
// Test (part of the) output that should have be printed by master when it aborts:
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
// Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
// Master will then abort and the /hbase/master zk node will be deleted.
createTableThread.start();
// Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
for (int i = 0; i < 30; i++) {
if (masterTracker.masterZKNodeWasDeleted == true) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail("InterruptedException while waiting for master zk node to " + "be deleted.");
}
}
assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
createTableThread.interrupt();
try {
createTableThread.join(1000);
} catch (InterruptedException e) {
assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestClassLoading method testPrivateClassLoader.
@Test
public // HBASE-6308: Test CP classloader is the CoprocessorClassLoader
void testPrivateClassLoader() throws Exception {
File jarFile = buildCoprocessorJar(cpName4);
// create a table that references the jar
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName4));
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER);
TableDescriptor tableDescriptor = tdb.build();
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessor was loaded correctly
boolean found = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) {
Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4);
if (cp != null) {
found = true;
assertEquals("Class " + cpName4 + " was not loaded by CoprocessorClassLoader", cp.getClass().getClassLoader().getClass(), CoprocessorClassLoader.class);
}
}
}
assertTrue("Class " + cpName4 + " was missing on a region", found);
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestClassLoading method testClassLoadingFromHDFS.
@Test
public // HBASE-3516: Test CP Class loading from HDFS
void testClassLoadingFromHDFS() throws Exception {
FileSystem fs = cluster.getFileSystem();
File jarFile1 = buildCoprocessorJar(cpName1);
File jarFile2 = buildCoprocessorJar(cpName2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName();
Path pathOnHDFS1 = new Path(jarFileOnHDFS1);
assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1);
fs.copyFromLocalFile(new Path(jarFile2.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName();
Path pathOnHDFS2 = new Path(jarFileOnHDFS2);
assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2);
// create a table that references the coprocessors
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName);
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
// without configuration values
tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER);
// with configuration values
tdb.setValue("COPROCESSOR$2", jarFileOnHDFS2 + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
CoprocessorClassLoader.clearCache();
byte[] startKey = { 10, 63 };
byte[] endKey = { 12, 43 };
TableDescriptor tableDescriptor = tdb.build();
admin.createTable(tableDescriptor, startKey, endKey, 4);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessors were loaded
boolean foundTableRegion = false;
boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true;
Map<Region, Set<ClassLoader>> regionsActiveClassLoaders = new HashMap<>();
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
foundTableRegion = true;
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
found1 = found1 && (env != null);
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
found2 = found2 && (env != null);
if (env != null) {
Configuration conf = env.getConfiguration();
found2_k1 = found2_k1 && (conf.get("k1") != null);
found2_k2 = found2_k2 && (conf.get("k2") != null);
found2_k3 = found2_k3 && (conf.get("k3") != null);
} else {
found2_k1 = false;
found2_k2 = false;
found2_k3 = false;
}
regionsActiveClassLoaders.put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders());
}
}
assertTrue("No region was found for table " + tableName, foundTableRegion);
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
// check if CP classloaders are cached
assertNotNull(jarFileOnHDFS1 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS1));
assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2));
// two external jar used, should be one classloader per jar
assertEquals("The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size());
// check if region active classloaders are shared across all RS regions
Set<ClassLoader> externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached());
for (Map.Entry<Region, Set<ClassLoader>> regionCP : regionsActiveClassLoaders.entrySet()) {
assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue()));
}
}
Aggregations