use of org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure in project hbase by apache.
the class TestSplitRegionWhileRSCrash method test.
@Test
public void test() throws Exception {
MasterProcedureEnv env = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment();
final ProcedureExecutor<MasterProcedureEnv> executor = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
List<RegionInfo> regionInfos = ADMIN.getRegions(TABLE_NAME);
// Since a flush request will be sent while initializing SplitTableRegionProcedure
// Create SplitTableRegionProcedure first before put data
SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure(env, regionInfos.get(0), Bytes.toBytes("row5"));
// write some rows to the table
LOG.info("Begin to put data");
for (int i = 0; i < 10; i++) {
byte[] row = Bytes.toBytes("row" + i);
Put put = new Put(row);
put.addColumn(CF, CF, CF);
TABLE.put(put);
}
executor.submitProcedure(splitProcedure);
LOG.info("SplitProcedure submitted");
UTIL.waitFor(30000, () -> executor.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure).map(p -> (TransitRegionStateProcedure) p).anyMatch(p -> TABLE_NAME.equals(p.getTableName())));
UTIL.getMiniHBaseCluster().killRegionServer(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName());
UTIL.getMiniHBaseCluster().startRegionServer();
UTIL.waitUntilNoRegionsInTransition();
Scan scan = new Scan();
ResultScanner results = TABLE.getScanner(scan);
int count = 0;
while (results.next() != null) {
count++;
}
Assert.assertEquals("There should be 10 rows!", 10, count);
}
use of org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure in project hbase by apache.
the class TestDirectStoreSplitsMerges method waitForSplitProcComplete.
private void waitForSplitProcComplete(int attempts, int waitTime) throws Exception {
List<Procedure<?>> procedures = TEST_UTIL.getHBaseCluster().getMaster().getProcedures();
if (procedures.size() > 0) {
Procedure splitProc = procedures.stream().filter(p -> p instanceof SplitTableRegionProcedure).findFirst().get();
int count = 0;
while ((splitProc.isWaiting() || splitProc.isRunnable()) && count < attempts) {
synchronized (splitProc) {
splitProc.wait(waitTime);
}
count++;
}
assertTrue(splitProc.isSuccess());
}
}
use of org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure in project hbase by apache.
the class TestSplitOrMergeStatus method testSplitRegionReplicaRitRecovery.
@Test
public void testSplitRegionReplicaRitRecovery() throws Exception {
int startRowNum = 11;
int rowCount = 60;
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2).build());
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
ServerName serverName = RegionReplicaTestHelper.getRSCarryingReplica(TEST_UTIL, tableName, 1).get();
List<RegionInfo> regions = TEST_UTIL.getAdmin().getRegions(tableName);
insertData(tableName, startRowNum, rowCount);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions.get(0), splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
// Disable the table
long procId1 = procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId1);
// Delete Table
long procId2 = procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId2);
AssignmentTestingUtil.killRs(TEST_UTIL, serverName);
Threads.sleepWithoutInterrupt(5000);
boolean hasRegionsInTransition = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition();
assertEquals(false, hasRegionsInTransition);
}
Aggregations