use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergeTableRegionsProcedure method testRollbackAndDoubleExecution.
@Test
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
RegionInfo[] regionsToMerge = new RegionInfo[2];
regionsToMerge[0] = tableRegions.get(0);
regionsToMerge[1] = tableRegions.get(1);
long procId = procExec.submitProcedure(new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
// Failing before MERGE_TABLE_REGIONS_UPDATE_META we should trigger the rollback
// NOTE: the 8 (number of MERGE_TABLE_REGIONS_UPDATE_META step) is
// hardcoded, so you have to look at this test at least once when you add a new step.
int lastStep = 8;
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep, true);
assertEquals(initialRegionCount, UTIL.getAdmin().getRegions(tableName).size());
UTIL.waitUntilAllRegionsAssigned(tableName);
List<HRegion> regions = UTIL.getMiniHBaseCluster().getRegions(tableName);
assertEquals(initialRegionCount, regions.size());
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergeTableRegionsProcedure method testMergeRegionsConcurrently.
/**
* This tests two concurrent region merges
*/
@Test
public void testMergeRegionsConcurrently() throws Exception {
final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
RegionInfo[] regionsToMerge1 = new RegionInfo[2];
RegionInfo[] regionsToMerge2 = new RegionInfo[2];
regionsToMerge1[0] = tableRegions.get(0);
regionsToMerge1[1] = tableRegions.get(1);
regionsToMerge2[0] = tableRegions.get(2);
regionsToMerge2[1] = tableRegions.get(3);
// collect AM metrics before test
collectAssignmentManagerMetrics();
long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge1, true));
long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge2, true));
ProcedureTestingUtility.waitProcedure(procExec, procId1);
ProcedureTestingUtility.waitProcedure(procExec, procId2);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
assertRegionCount(tableName, initialRegionCount - 2);
assertEquals(mergeSubmittedCount + 2, mergeProcMetrics.getSubmittedCounter().getCount());
assertEquals(mergeFailedCount, mergeProcMetrics.getFailedCounter().getCount());
assertEquals(assignSubmittedCount + 2, assignProcMetrics.getSubmittedCounter().getCount());
assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount());
assertEquals(unassignSubmittedCount + 4, unassignProcMetrics.getSubmittedCounter().getCount());
assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount());
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergeTableRegionsProcedure method testRecoveryAndDoubleExecution.
@Test
public void testRecoveryAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillIfHasParent(procExec, false);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
RegionInfo[] regionsToMerge = new RegionInfo[2];
regionsToMerge[0] = tableRegions.get(0);
regionsToMerge[1] = tableRegions.get(1);
long procId = procExec.submitProcedure(new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
// Restart the executor and execute the step twice
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
assertRegionCount(tableName, initialRegionCount - 1);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitTableRegionProcedure method testSplitWithoutPONR.
@Test
public void testSplitWithoutPONR() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1, columnFamilyName2);
insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Execute until step 7 of split procedure
// NOTE: the 7 (number after SPLIT_TABLE_REGION_UPDATE_META step)
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 7, false);
// Unset Toggle Kill and make ProcExec work correctly
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
MasterProcedureTestingUtility.restartMasterProcedureExecutor(procExec);
ProcedureTestingUtility.waitProcedure(procExec, procId);
// Even split failed after step 4, it should still works fine
verify(tableName, splitRowNum);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitTableRegionProcedure method testSplitTableRegionNoStoreFile.
@Test
public void testSplitTableRegionNoStoreFile() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1, columnFamilyName2);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
// collect AM metrics before test
collectAssignmentManagerMetrics();
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2);
assertTrue(UTIL.countRows(tableName) == 0);
assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount());
assertEquals(splitFailedCount, splitProcMetrics.getFailedCounter().getCount());
}
Aggregations