use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRSGroupsOfflineMode method testOffline.
@Test
public void testOffline() throws Exception, InterruptedException {
// Table should be after group table name so it gets assigned later.
final TableName failoverTable = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f"));
final HRegionServer killRS = ((MiniHBaseCluster) cluster).getRegionServer(0);
final HRegionServer groupRS = ((MiniHBaseCluster) cluster).getRegionServer(1);
final HRegionServer failoverRS = ((MiniHBaseCluster) cluster).getRegionServer(2);
String newGroup = "my_group";
RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
groupAdmin.addRSGroup(newGroup);
if (master.getAssignmentManager().getRegionStates().getRegionAssignments().containsValue(failoverRS.getServerName())) {
for (HRegionInfo regionInfo : hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) {
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(failoverRS.getServerName().getServerName()));
}
LOG.info("Waiting for region unassignments on failover RS...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return master.getServerManager().getLoad(failoverRS.getServerName()).getRegionsLoad().size() > 0;
}
});
}
// Move server to group and make sure all tables are assigned.
groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getAddress()), newGroup);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return groupRS.getNumberOfOnlineRegions() < 1 && master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1;
}
});
// Move table to group and wait.
groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup);
LOG.info("Waiting for move table...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return groupRS.getNumberOfOnlineRegions() == 1;
}
});
groupRS.stop("die");
// Race condition here.
TEST_UTIL.getHBaseCluster().getMaster().stopMaster();
LOG.info("Waiting for offline mode...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getHBaseCluster().getMaster() != null && TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() <= 3;
}
});
// Get groupInfoManager from the new active master.
RSGroupInfoManager groupMgr = ((MiniHBaseCluster) cluster).getMaster().getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0).getGroupInfoManager();
// Make sure balancer is in offline mode, since this is what we're testing.
assertFalse(groupMgr.isOnline());
// Verify the group affiliation that's loaded from ZK instead of tables.
assertEquals(newGroup, groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME));
assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
// Kill final regionserver to see the failover happens for all tables except GROUP table since
// it's group does not have any online RS.
killRS.stop("die");
master = TEST_UTIL.getHBaseCluster().getMaster();
LOG.info("Waiting for new table assignment...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return failoverRS.getOnlineRegions(failoverTable).size() >= 1;
}
});
Assert.assertEquals(0, failoverRS.getOnlineRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size());
// Need this for minicluster to shutdown cleanly.
master.stopMaster();
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestAdmin2 method testMoveToPreviouslyAssignedRS.
@Test(timeout = 300000)
public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
final TableName tableName = TableName.valueOf(name.getMethodName());
Admin localAdmin = createTable(tableName);
List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
HRegionInfo hri = tableRegions.get(0);
AssignmentManager am = master.getAssignmentManager();
assertTrue("Region " + hri.getRegionNameAsString() + " should be assigned properly", am.waitForAssignment(hri));
ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
assertEquals("Current region server and region server before move should be same.", server, am.getRegionStates().getRegionServerOfRegion(hri));
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestEnableTable method testEnableTableWithNoRegionServers.
@Test(timeout = 300000)
public void testEnableTableWithNoRegionServers() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final Admin admin = TEST_UTIL.getAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc);
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
admin.enableTable(tableName);
TEST_UTIL.waitTableEnabled(tableName);
// disable once more
admin.disableTable(tableName);
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
// now stop region servers
JVMClusterUtil.RegionServerThread rs = cluster.getRegionServerThreads().get(0);
rs.getRegionServer().stop("stop");
cluster.waitForRegionServerToStop(rs.getRegionServer().getServerName(), 10000);
LOG.debug("Now enabling table " + tableName);
admin.enableTable(tableName);
assertTrue(admin.isTableEnabled(tableName));
JVMClusterUtil.RegionServerThread rs2 = cluster.startRegionServer();
cluster.waitForRegionServerToStart(rs2.getRegionServer().getServerName().getHostname(), rs2.getRegionServer().getServerName().getPort(), 60000);
List<HRegionInfo> regions = TEST_UTIL.getAdmin().getTableRegions(tableName);
assertEquals(1, regions.size());
for (HRegionInfo region : regions) {
TEST_UTIL.getAdmin().assign(region.getEncodedNameAsBytes());
}
LOG.debug("Waiting for table assigned " + tableName);
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
List<HRegionInfo> onlineRegions = admin.getOnlineRegions(rs2.getRegionServer().getServerName());
ArrayList<HRegionInfo> tableRegions = filterTableRegions(tableName, onlineRegions);
assertEquals(1, tableRegions.size());
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestHTableMultiplexerFlushCache method testOnRegionMove.
@Test
public void testOnRegionMove() throws Exception {
// This test is doing near exactly the same thing that testOnRegionChange but avoiding the
// potential to get a ConnectionClosingException. By moving the region, we can be certain that
// the connection is still valid and that the implementation is correctly handling an invalid
// Region cache (and not just tearing down the entire connection).
final TableName tableName = TableName.valueOf(name.getMethodName());
final int NUM_REGIONS = 10;
Table htable = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 3, Bytes.toBytes("aaaaa"), Bytes.toBytes("zzzzz"), NUM_REGIONS);
HTableMultiplexer multiplexer = new HTableMultiplexer(TEST_UTIL.getConfiguration(), PER_REGIONSERVER_QUEUE_SIZE);
final RegionLocator regionLocator = TEST_UTIL.getConnection().getRegionLocator(tableName);
Pair<byte[][], byte[][]> startEndRows = regionLocator.getStartEndKeys();
byte[] row = startEndRows.getFirst()[1];
assertTrue("2nd region should not start with empty row", row != null && row.length > 0);
Put put = new Put(row).addColumn(FAMILY, QUALIFIER1, VALUE1);
assertTrue("multiplexer.put returns", multiplexer.put(tableName, put));
checkExistence(htable, row, FAMILY, QUALIFIER1, VALUE1);
final HRegionLocation loc = regionLocator.getRegionLocation(row);
final MiniHBaseCluster hbaseCluster = TEST_UTIL.getHBaseCluster();
// The current server for the region we're writing to
final ServerName originalServer = loc.getServerName();
ServerName newServer = null;
// Find a new server to move that region to
for (int i = 0; i < SLAVES; i++) {
HRegionServer rs = hbaseCluster.getRegionServer(0);
if (!rs.getServerName().equals(originalServer.getServerName())) {
newServer = rs.getServerName();
break;
}
}
assertNotNull("Did not find a new RegionServer to use", newServer);
// Move the region
LOG.info("Moving " + loc.getRegionInfo().getEncodedName() + " from " + originalServer + " to " + newServer);
TEST_UTIL.getAdmin().move(loc.getRegionInfo().getEncodedNameAsBytes(), Bytes.toBytes(newServer.getServerName()));
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
// Send a new Put
put = new Put(row).addColumn(FAMILY, QUALIFIER2, VALUE2);
assertTrue("multiplexer.put returns", multiplexer.put(tableName, put));
// We should see the update make it to the new server eventually
checkExistence(htable, row, FAMILY, QUALIFIER2, VALUE2);
}
use of org.apache.hadoop.hbase.MiniHBaseCluster in project hbase by apache.
the class TestRollingRestart method testBasicRollingRestart.
@Test(timeout = 500000)
public void testBasicRollingRestart() throws Exception {
// Start a cluster with 2 masters and 4 regionservers
final int NUM_MASTERS = 2;
final int NUM_RS = 3;
final int NUM_REGIONS_TO_CREATE = 20;
int expectedNumRS = 3;
// Start the cluster
log("Starting cluster");
Configuration conf = HBaseConfiguration.create();
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();
// Create a table with regions
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
int numRegions = -1;
try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
numRegions = r.getStartKeys().length;
}
// catalogs
numRegions += 1;
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Disabling table\n");
TEST_UTIL.getAdmin().disableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
log("Verifying only catalog and namespace regions are assigned\n");
if (regions.size() != 2) {
for (String oregion : regions) log("Region still online: " + oregion);
}
assertEquals(2, regions.size());
log("Enabling table\n");
TEST_UTIL.getAdmin().enableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster\n");
regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Add a new regionserver
log("Adding a fourth RS");
RegionServerThread restarted = cluster.startRegionServer();
expectedNumRS++;
restarted.waitForServerOnline();
log("Additional RS is online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Master Restarts
List<MasterThread> masterThreads = cluster.getMasterThreads();
MasterThread activeMaster = null;
MasterThread backupMaster = null;
assertEquals(2, masterThreads.size());
if (masterThreads.get(0).getMaster().isActiveMaster()) {
activeMaster = masterThreads.get(0);
backupMaster = masterThreads.get(1);
} else {
activeMaster = masterThreads.get(1);
backupMaster = masterThreads.get(0);
}
// Bring down the backup master
log("Stopping backup master\n\n");
backupMaster.getMaster().stop("Stop of backup during rolling restart");
cluster.hbaseCluster.waitOnMaster(backupMaster);
// Bring down the primary master
log("Stopping primary master\n\n");
activeMaster.getMaster().stop("Stop of active during rolling restart");
cluster.hbaseCluster.waitOnMaster(activeMaster);
// Start primary master
log("Restarting primary master\n\n");
activeMaster = cluster.startMaster();
cluster.waitForActiveAndReadyMaster();
// Start backup master
log("Restarting backup master\n\n");
backupMaster = cluster.startMaster();
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// RegionServer Restarts
// Bring them down, one at a time, waiting between each to complete
List<RegionServerThread> regionServers = cluster.getLiveRegionServerThreads();
int num = 1;
int total = regionServers.size();
for (RegionServerThread rst : regionServers) {
ServerName serverName = rst.getRegionServer().getServerName();
log("Stopping region server " + num + " of " + total + " [ " + serverName + "]");
rst.getRegionServer().stop("Stopping RS during rolling restart");
cluster.hbaseCluster.waitOnRegionServer(rst);
log("Waiting for RS shutdown to be handled by master");
waitForRSShutdownToStartAndFinish(activeMaster, serverName);
log("RS shutdown done, waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
expectedNumRS--;
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
log("Restarting region server " + num + " of " + total);
restarted = cluster.startRegionServer();
restarted.waitForServerOnline();
expectedNumRS++;
log("Region server " + num + " is back online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
num++;
}
Thread.sleep(1000);
assertRegionsAssigned(cluster, regions);
// TODO: Bring random 3 of 4 RS down at the same time
ht.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}
Aggregations