use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestClassLoading method testClassLoadingFromLocalFS.
@Test
public // HBASE-3516: Test CP Class loading from local file system
void testClassLoadingFromLocalFS() throws Exception {
File jarFile = buildCoprocessorJar(cpName3);
// create a table that references the jar
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName3));
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER);
TableDescriptor tableDescriptor = tdb.build();
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessor was loaded
boolean found = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) {
found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
}
}
assertTrue("Class " + cpName3 + " was missing on a region", found);
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestClassLoading method loadingClassFromLibDirInJar.
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName);
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
// without configuration values
tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER);
// with configuration values
tdb.setValue("COPROCESSOR$2", jarFileOnHDFS + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
TableDescriptor tableDescriptor = tdb.build();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestFromClientSide4 method testUnmanagedHConnectionReconnect.
/**
* test of that unmanaged HConnections are able to reconnect
* properly (see HBASE-5058)
*/
@Test
public void testUnmanagedHConnectionReconnect() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
Class registryImpl = conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class);
// This test does not make sense for MasterRegistry since it stops the only master in the
// cluster and starts a new master without populating the underlying config for the connection.
Assume.assumeFalse(registryImpl.equals(MasterRegistry.class));
final TableName tableName = name.getTableName();
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
try (Table t = conn.getTable(tableName);
Admin admin = conn.getAdmin()) {
assertTrue(admin.tableExists(tableName));
assertTrue(t.get(new Get(ROW)).isEmpty());
}
// stop the master
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
cluster.stopMaster(0, false);
cluster.waitOnMaster(0);
// start up a new master
cluster.startMaster();
assertTrue(cluster.waitForActiveAndReadyMaster());
// Admin and can connect to the new master;
try (Admin admin = conn.getAdmin()) {
assertTrue(admin.tableExists(tableName));
assertEquals(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size(), SLAVES);
}
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestRegionPlacement method verifyRegionServerUpdated.
/**
* Verify all the online region servers has been updated to the
* latest assignment plan
* @param plan
* @throws IOException
*/
private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException {
// Verify all region servers contain the correct favored nodes information
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
for (int i = 0; i < SLAVES; i++) {
HRegionServer rs = cluster.getRegionServer(i);
for (Region region : rs.getRegions(TableName.valueOf("testRegionAssignment"))) {
InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(region.getRegionInfo().getEncodedName());
String regionName = region.getRegionInfo().getRegionNameAsString();
List<ServerName> favoredServerList = plan.getAssignmentMap().get(regionName);
// except for hbase:meta and ROOT
if (favoredServerList == null) {
TableDescriptor desc = region.getTableDescriptor();
// Verify they are ROOT and hbase:meta regions since no favored nodes
assertNull(favoredSocketAddress);
assertTrue("User region " + region.getTableDescriptor().getTableName() + " should have favored nodes", desc.isMetaRegion());
} else {
// For user region, the favored nodes in the region server should be
// identical to favored nodes in the assignmentPlan
assertTrue(favoredSocketAddress.length == favoredServerList.size());
assertTrue(favoredServerList.size() > 0);
for (int j = 0; j < favoredServerList.size(); j++) {
InetSocketAddress addrFromRS = favoredSocketAddress[j];
InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved(favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort());
assertNotNull(addrFromRS);
assertNotNull(addrFromPlan);
assertTrue("Region server " + rs.getServerName().getAddress() + " has the " + positions[j] + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, addrFromRS.equals(addrFromPlan));
}
}
}
}
}
use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.
the class TestRollingRestart method testBasicRollingRestart.
@Test
public void testBasicRollingRestart() throws Exception {
// Start a cluster with 2 masters and 4 regionservers
final int NUM_MASTERS = 2;
final int NUM_RS = 3;
final int NUM_REGIONS_TO_CREATE = 20;
int expectedNumRS = 3;
// Start the cluster
log("Starting cluster");
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, splitWALCoordinatedByZK);
TEST_UTIL = new HBaseTestingUtil(conf);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();
// Create a table with regions
final TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[\\[|\\]]", "-"));
byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
int numRegions = -1;
try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
numRegions = r.getStartKeys().length;
}
// catalogs
numRegions += 1;
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Disabling table\n");
TEST_UTIL.getAdmin().disableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
NavigableSet<String> regions = HBaseTestingUtil.getAllOnlineRegions(cluster);
log("Verifying only catalog region is assigned\n");
if (regions.size() != 1) {
for (String oregion : regions) {
log("Region still online: " + oregion);
}
}
assertEquals(1, regions.size());
log("Enabling table\n");
TEST_UTIL.getAdmin().enableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster\n");
regions = HBaseTestingUtil.getAllOnlineRegions(cluster);
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Add a new regionserver
log("Adding a fourth RS");
RegionServerThread restarted = cluster.startRegionServer();
expectedNumRS++;
restarted.waitForServerOnline();
log("Additional RS is online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Master Restarts
List<MasterThread> masterThreads = cluster.getMasterThreads();
MasterThread activeMaster = null;
MasterThread backupMaster = null;
assertEquals(2, masterThreads.size());
if (masterThreads.get(0).getMaster().isActiveMaster()) {
activeMaster = masterThreads.get(0);
backupMaster = masterThreads.get(1);
} else {
activeMaster = masterThreads.get(1);
backupMaster = masterThreads.get(0);
}
// Bring down the backup master
log("Stopping backup master\n\n");
backupMaster.getMaster().stop("Stop of backup during rolling restart");
cluster.hbaseCluster.waitOnMaster(backupMaster);
// Bring down the primary master
log("Stopping primary master\n\n");
activeMaster.getMaster().stop("Stop of active during rolling restart");
cluster.hbaseCluster.waitOnMaster(activeMaster);
// Start primary master
log("Restarting primary master\n\n");
activeMaster = cluster.startMaster();
cluster.waitForActiveAndReadyMaster();
// Start backup master
log("Restarting backup master\n\n");
backupMaster = cluster.startMaster();
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// RegionServer Restarts
// Bring them down, one at a time, waiting between each to complete
List<RegionServerThread> regionServers = cluster.getLiveRegionServerThreads();
int num = 1;
int total = regionServers.size();
for (RegionServerThread rst : regionServers) {
ServerName serverName = rst.getRegionServer().getServerName();
log("Stopping region server " + num + " of " + total + " [ " + serverName + "]");
rst.getRegionServer().stop("Stopping RS during rolling restart");
cluster.hbaseCluster.waitOnRegionServer(rst);
log("Waiting for RS shutdown to be handled by master");
waitForRSShutdownToStartAndFinish(activeMaster, serverName);
log("RS shutdown done, waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
expectedNumRS--;
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
log("Restarting region server " + num + " of " + total);
restarted = cluster.startRegionServer();
restarted.waitForServerOnline();
expectedNumRS++;
log("Region server " + num + " is back online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
num++;
}
Thread.sleep(1000);
assertRegionsAssigned(cluster, regions);
// TODO: Bring random 3 of 4 RS down at the same time
ht.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}
Aggregations