use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class FlushRandomRegionOfTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
getLogger().info("Performing action: Flush random region of table " + tableName);
List<RegionInfo> regions = admin.getRegions(tableName);
if (regions == null || regions.isEmpty()) {
getLogger().info("Table " + tableName + " doesn't have regions to flush");
return;
}
RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(regions.toArray(new RegionInfo[0]));
getLogger().debug("Flushing region " + region.getRegionNameAsString());
try {
admin.flushRegion(region.getRegionName());
} catch (Exception ex) {
getLogger().warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class MoveRandomRegionOfTableAction method perform.
@Override
public void perform() throws Exception {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
getLogger().info("Performing action: Move random region of table " + tableName);
List<RegionInfo> regions = admin.getRegions(tableName);
if (regions == null || regions.isEmpty()) {
getLogger().info("Table " + tableName + " doesn't have regions to move");
return;
}
RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(regions.toArray(new RegionInfo[0]));
getLogger().debug("Move random region {}", region.getRegionNameAsString());
// Use facility over in MoveRegionsOfTableAction...
MoveRegionsOfTableAction.moveRegion(admin, MoveRegionsOfTableAction.getServers(admin), region, getLogger());
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class CompactMobAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
getLogger().info("Performing action: Compact mob of table " + tableName + ", major=" + major);
try {
if (major) {
admin.majorCompact(tableName, CompactType.MOB);
} else {
admin.compact(tableName, CompactType.MOB);
}
} catch (Exception ex) {
getLogger().warn("Mob Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class CompactTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
getLogger().info("Performing action: Compact table " + tableName + ", major=" + major);
try {
if (major) {
admin.majorCompact(tableName);
} else {
admin.compact(tableName);
}
} catch (Exception ex) {
getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HBaseTestingUtil in project hbase by apache.
the class TestRollingRestart method testBasicRollingRestart.
@Test
public void testBasicRollingRestart() throws Exception {
// Start a cluster with 2 masters and 4 regionservers
final int NUM_MASTERS = 2;
final int NUM_RS = 3;
final int NUM_REGIONS_TO_CREATE = 20;
int expectedNumRS = 3;
// Start the cluster
log("Starting cluster");
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, splitWALCoordinatedByZK);
TEST_UTIL = new HBaseTestingUtil(conf);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();
// Create a table with regions
final TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[\\[|\\]]", "-"));
byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
int numRegions = -1;
try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
numRegions = r.getStartKeys().length;
}
// catalogs
numRegions += 1;
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Disabling table\n");
TEST_UTIL.getAdmin().disableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
NavigableSet<String> regions = HBaseTestingUtil.getAllOnlineRegions(cluster);
log("Verifying only catalog region is assigned\n");
if (regions.size() != 1) {
for (String oregion : regions) {
log("Region still online: " + oregion);
}
}
assertEquals(1, regions.size());
log("Enabling table\n");
TEST_UTIL.getAdmin().enableTable(tableName);
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster\n");
regions = HBaseTestingUtil.getAllOnlineRegions(cluster);
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Add a new regionserver
log("Adding a fourth RS");
RegionServerThread restarted = cluster.startRegionServer();
expectedNumRS++;
restarted.waitForServerOnline();
log("Additional RS is online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// Master Restarts
List<MasterThread> masterThreads = cluster.getMasterThreads();
MasterThread activeMaster = null;
MasterThread backupMaster = null;
assertEquals(2, masterThreads.size());
if (masterThreads.get(0).getMaster().isActiveMaster()) {
activeMaster = masterThreads.get(0);
backupMaster = masterThreads.get(1);
} else {
activeMaster = masterThreads.get(1);
backupMaster = masterThreads.get(0);
}
// Bring down the backup master
log("Stopping backup master\n\n");
backupMaster.getMaster().stop("Stop of backup during rolling restart");
cluster.hbaseCluster.waitOnMaster(backupMaster);
// Bring down the primary master
log("Stopping primary master\n\n");
activeMaster.getMaster().stop("Stop of active during rolling restart");
cluster.hbaseCluster.waitOnMaster(activeMaster);
// Start primary master
log("Restarting primary master\n\n");
activeMaster = cluster.startMaster();
cluster.waitForActiveAndReadyMaster();
// Start backup master
log("Restarting backup master\n\n");
backupMaster = cluster.startMaster();
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
// RegionServer Restarts
// Bring them down, one at a time, waiting between each to complete
List<RegionServerThread> regionServers = cluster.getLiveRegionServerThreads();
int num = 1;
int total = regionServers.size();
for (RegionServerThread rst : regionServers) {
ServerName serverName = rst.getRegionServer().getServerName();
log("Stopping region server " + num + " of " + total + " [ " + serverName + "]");
rst.getRegionServer().stop("Stopping RS during rolling restart");
cluster.hbaseCluster.waitOnRegionServer(rst);
log("Waiting for RS shutdown to be handled by master");
waitForRSShutdownToStartAndFinish(activeMaster, serverName);
log("RS shutdown done, waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
expectedNumRS--;
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
log("Restarting region server " + num + " of " + total);
restarted = cluster.startRegionServer();
restarted.waitForServerOnline();
expectedNumRS++;
log("Region server " + num + " is back online");
log("Waiting for no more RIT");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
num++;
}
Thread.sleep(1000);
assertRegionsAssigned(cluster, regions);
// TODO: Bring random 3 of 4 RS down at the same time
ht.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}
Aggregations