use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergeTableRegionsProcedure method testMergeWithoutPONR.
@Test
public void testMergeWithoutPONR() throws Exception {
final TableName tableName = TableName.valueOf("testMergeWithoutPONR");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
RegionInfo[] regionsToMerge = new RegionInfo[2];
regionsToMerge[0] = tableRegions.get(0);
regionsToMerge[1] = tableRegions.get(1);
long procId = procExec.submitProcedure(new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
// Execute until step 9 of split procedure
// NOTE: step 9 is after step MERGE_TABLE_REGIONS_UPDATE_META
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 9, false);
// Unset Toggle Kill and make ProcExec work correctly
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
MasterProcedureTestingUtility.restartMasterProcedureExecutor(procExec);
ProcedureTestingUtility.waitProcedure(procExec, procId);
assertRegionCount(tableName, initialRegionCount - 1);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class MockMasterServices method startProcedureExecutor.
private void startProcedureExecutor(final RSProcedureDispatcher remoteDispatcher) throws IOException {
final Configuration conf = getConfiguration();
this.procedureStore = new NoopProcedureStore();
this.procedureStore.registerListener(new ProcedureStoreListener() {
@Override
public void abortProcess() {
abort("The Procedure Store lost the lease", null);
}
});
this.procedureEnv = new MasterProcedureEnv(this, remoteDispatcher != null ? remoteDispatcher : new RSProcedureDispatcher(this));
this.procedureExecutor = new ProcedureExecutor<>(conf, procedureEnv, procedureStore, procedureEnv.getProcedureScheduler());
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max(Runtime.getRuntime().availableProcessors(), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
final boolean abortOnCorruption = conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
this.procedureStore.start(numThreads);
ProcedureTestingUtility.initAndStartWorkers(procedureExecutor, numThreads, abortOnCorruption);
this.procedureEnv.getRemoteDispatcher().start();
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergesSplitsAddToTracker method testCommitMergedRegion.
@Test
public void testCommitMergedRegion() throws Exception {
TableName table = createTable(null);
// splitting the table first
TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002"));
// Add data and flush to create files in the two different regions
putThreeRowsAndFlush(table);
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table);
HRegion first = regions.get(0);
HRegion second = regions.get(1);
HRegionFileSystem regionFS = first.getRegionFileSystem();
RegionInfo mergeResult = RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()).setEndKey(second.getRegionInfo().getEndKey()).setSplit(false).setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()).build();
HRegionFileSystem mergeFS = HRegionFileSystem.createRegionOnFileSystem(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult);
List<Path> mergedFiles = new ArrayList<>();
// merge file from first region
mergedFiles.add(mergeFileFromRegion(first, mergeFS));
// merge file from second region
mergedFiles.add(mergeFileFromRegion(second, mergeFS));
MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
mergeFS.commitMergedRegion(mergedFiles, env);
// validate
FileSystem fs = first.getRegionFileSystem().getFileSystem();
Path finalMergeDir = new Path(first.getRegionFileSystem().getTableDir(), mergeResult.getEncodedName());
verifyFilesAreTracked(finalMergeDir, fs);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestMergesSplitsAddToTracker method testCommitDaughterRegion.
@Test
public void testCommitDaughterRegion() throws Exception {
TableName table = createTable(null);
// first put some data in order to have a store file created
putThreeRowsAndFlush(table);
HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0);
HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem();
RegionInfo daughterA = RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")).setSplit(false).setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()).build();
RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002")).setEndKey(region.getRegionInfo().getEndKey()).setSplit(false).setRegionId(region.getRegionInfo().getRegionId()).build();
HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
List<Path> splitFilesA = new ArrayList<>();
splitFilesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, Bytes.toBytes("002"), false, region.getSplitPolicy()));
List<Path> splitFilesB = new ArrayList<>();
splitFilesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, Bytes.toBytes("002"), true, region.getSplitPolicy()));
MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
Path resultA = regionFS.commitDaughterRegion(daughterA, splitFilesA, env);
Path resultB = regionFS.commitDaughterRegion(daughterB, splitFilesB, env);
FileSystem fs = regionFS.getFileSystem();
verifyFilesAreTracked(resultA, fs);
verifyFilesAreTracked(resultB, fs);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitTableRegionProcedure method testRecoveryAndDoubleExecution.
@Test
public void testRecoveryAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1, columnFamilyName2);
insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillIfHasParent(procExec, false);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// collect AM metrics before test
collectAssignmentManagerMetrics();
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Restart the executor and execute the step twice
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
verify(tableName, splitRowNum);
assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount());
assertEquals(splitFailedCount, splitProcMetrics.getFailedCounter().getCount());
}
Aggregations