use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestTransitRegionStateProcedure method testRecoveryAndDoubleExecutionUnassignAndAssign.
@Test
public void testRecoveryAndDoubleExecutionUnassignAndAssign() throws Exception {
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0);
RegionInfo regionInfo = region.getRegionInfo();
long openSeqNum = region.getOpenSeqNum();
TransitRegionStateProcedure unassign = TransitRegionStateProcedure.unassign(env, regionInfo);
testRecoveryAndDoubleExcution(unassign);
AssignmentManager am = master.getAssignmentManager();
assertTrue(am.getRegionStates().getRegionState(regionInfo).isClosed());
TransitRegionStateProcedure assign = TransitRegionStateProcedure.assign(env, regionInfo, null);
testRecoveryAndDoubleExcution(assign);
HRegion region2 = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0);
long openSeqNum2 = region2.getOpenSeqNum();
// confirm that the region is successfully opened
assertTrue(openSeqNum2 > openSeqNum);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestTransitRegionStateProcedure method testRecoveryAndDoubleExecutionReopen.
@Test
public void testRecoveryAndDoubleExecutionReopen() throws Exception {
MasterProcedureEnv env = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableName);
HRegion region = rs.getRegions(tableName).get(0);
region.addReadRequestsCount(1);
region.addWriteRequestsCount(2);
long openSeqNum = region.getOpenSeqNum();
TransitRegionStateProcedure proc = TransitRegionStateProcedure.reopen(env, region.getRegionInfo());
testRecoveryAndDoubleExcution(proc);
// should still be on the same RS
HRegion region2 = rs.getRegions(tableName).get(0);
long openSeqNum2 = region2.getOpenSeqNum();
// confirm that the region is successfully opened
assertTrue(openSeqNum2 > openSeqNum);
// we check the available by scan after table created,
// so the readRequestsCount should be 2 here
assertEquals(2, region2.getReadRequestsCount());
assertEquals(2, region2.getWriteRequestsCount());
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestTransitRegionStateProcedure method testRecoveryAndDoubleExecutionMove.
@Test
public void testRecoveryAndDoubleExecutionMove() throws Exception {
MasterProcedureEnv env = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0);
long openSeqNum = region.getOpenSeqNum();
TransitRegionStateProcedure proc = TransitRegionStateProcedure.move(env, region.getRegionInfo(), null);
testRecoveryAndDoubleExcution(proc);
HRegion region2 = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0);
long openSeqNum2 = region2.getOpenSeqNum();
// confirm that the region is successfully opened
assertTrue(openSeqNum2 > openSeqNum);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestDirectStoreSplitsMerges method testCommitDaughterRegionWithFiles.
@Test
public void testCommitDaughterRegionWithFiles() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(table, FAMILY_NAME);
// first put some data in order to have a store file created
putThreeRowsAndFlush(table);
HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0);
HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem();
RegionInfo daughterA = RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")).setSplit(false).setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()).build();
RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002")).setEndKey(region.getRegionInfo().getEndKey()).setSplit(false).setRegionId(region.getRegionInfo().getRegionId()).build();
Path splitDirA = regionFS.getSplitsDir(daughterA);
Path splitDirB = regionFS.getSplitsDir(daughterB);
HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
List<Path> filesA = new ArrayList<>();
filesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, Bytes.toBytes("002"), false, region.getSplitPolicy()));
List<Path> filesB = new ArrayList<>();
filesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, Bytes.toBytes("002"), true, region.getSplitPolicy()));
MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
Path resultA = regionFS.commitDaughterRegion(daughterA, filesA, env);
Path resultB = regionFS.commitDaughterRegion(daughterB, filesB, env);
assertEquals(splitDirA, resultA);
assertEquals(splitDirB, resultB);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestDirectStoreSplitsMerges method testCommitMergedRegion.
@Test
public void testCommitMergedRegion() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(table, FAMILY_NAME);
// splitting the table first
TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002"));
waitForSplitProcComplete(1000, 10);
// Add data and flush to create files in the two different regions
putThreeRowsAndFlush(table);
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table);
HRegion first = regions.get(0);
HRegion second = regions.get(1);
HRegionFileSystem regionFS = first.getRegionFileSystem();
RegionInfo mergeResult = RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()).setEndKey(second.getRegionInfo().getEndKey()).setSplit(false).setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()).build();
HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult);
// merge file from first region
HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
mergeFileFromRegion(mergeRegionFs, first, file);
// merge file from second region
file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0];
List<Path> mergedFiles = new ArrayList<>();
mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file));
MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
mergeRegionFs.commitMergedRegion(mergedFiles, env);
}
Aggregations