use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.
the class TestSplitWALProcedure method testHandleDeadWorker.
@Test
public void testHandleDeadWorker() throws Exception {
Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
for (int i = 0; i < 10; i++) {
TEST_UTIL.loadTable(table, FAMILY);
}
HRegionServer testServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
List<FileStatus> wals = splitWALManager.getWALsToSplit(testServer.getServerName(), false);
Assert.assertEquals(1, wals.size());
TEST_UTIL.getHBaseCluster().killRegionServer(testServer.getServerName());
TEST_UTIL.waitFor(30000, () -> master.getProcedures().stream().anyMatch(procedure -> procedure instanceof SplitWALProcedure));
Procedure splitWALProcedure = master.getProcedures().stream().filter(procedure -> procedure instanceof SplitWALProcedure).findAny().get();
Assert.assertNotNull(splitWALProcedure);
TEST_UTIL.waitFor(5000, () -> ((SplitWALProcedure) splitWALProcedure).getWorker() != null);
TEST_UTIL.getHBaseCluster().killRegionServer(((SplitWALProcedure) splitWALProcedure).getWorker());
ProcedureTestingUtility.waitProcedure(masterPE, splitWALProcedure.getProcId());
Assert.assertTrue(splitWALProcedure.isSuccess());
ProcedureTestingUtility.waitAllProcedures(masterPE);
}
use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.
the class MasterProcedureTestingUtility method restartMasterProcedureExecutor.
public static void restartMasterProcedureExecutor(ProcedureExecutor<MasterProcedureEnv> procExec) throws Exception {
final MasterProcedureEnv env = procExec.getEnvironment();
final HMaster master = (HMaster) env.getMasterServices();
ProcedureTestingUtility.restart(procExec, true, true, // stop services
new Callable<Void>() {
@Override
public Void call() throws Exception {
AssignmentManager am = env.getAssignmentManager();
// try to simulate a master restart by removing the ServerManager states about seqIDs
for (RegionState regionState : am.getRegionStates().getRegionStates()) {
env.getMasterServices().getServerManager().removeRegion(regionState.getRegion());
}
am.stop();
master.setInitialized(false);
return null;
}
}, // setup RIT before starting workers
new Callable<Void>() {
@Override
public Void call() throws Exception {
AssignmentManager am = env.getAssignmentManager();
am.start();
// just follow the same way with HMaster.finishActiveMasterInitialization. See the
// comments there
am.setupRIT(procExec.getActiveProceduresNoCopy().stream().filter(p -> !p.isSuccess()).filter(p -> p instanceof TransitRegionStateProcedure).map(p -> (TransitRegionStateProcedure) p).collect(Collectors.toList()));
return null;
}
}, // restart services
new Callable<Void>() {
@Override
public Void call() throws Exception {
AssignmentManager am = env.getAssignmentManager();
try {
am.joinCluster();
am.wakeMetaLoadedEvent();
master.setInitialized(true);
} catch (Exception e) {
LOG.warn("Failed to load meta", e);
}
return null;
}
});
}
use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.
the class TestSplitWALManager method testCreateSplitWALProcedures.
@Test
public void testCreateSplitWALProcedures() throws Exception {
TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
// load table
TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY);
ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
Path metaWALDir = new Path(TEST_UTIL.getDefaultRootDirPath(), AbstractFSWALProvider.getWALDirectoryName(metaServer.toString()));
// Test splitting meta wal
FileStatus[] wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.META_FILTER);
Assert.assertEquals(1, wals.length);
List<Procedure> testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
Assert.assertEquals(1, testProcedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
// Test splitting wal
wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.NON_META_FILTER);
Assert.assertEquals(1, wals.length);
testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
Assert.assertEquals(1, testProcedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
}
use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.
the class TestSplitWALManager method splitLogsTestHelper.
private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception {
HMaster hmaster = testUtil.getHBaseCluster().getMaster();
SplitWALManager splitWALManager = hmaster.getSplitWALManager();
LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem().getFileSystem().getUri());
LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem().getWALFileSystem().getUri());
testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE);
// load table
testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY);
ProcedureExecutor<MasterProcedureEnv> masterPE = hmaster.getMasterProcedureExecutor();
ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta();
ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream().map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny().get();
List<Procedure> procedures = splitWALManager.splitWALs(testServer, false);
Assert.assertEquals(1, procedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
Assert.assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size());
// Validate the old WAL file archive dir
Path walRootDir = hmaster.getMasterFileSystem().getWALRootDir();
Path walArchivePath = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
FileSystem walFS = hmaster.getMasterFileSystem().getWALFileSystem();
int archiveFileCount = walFS.listStatus(walArchivePath).length;
procedures = splitWALManager.splitWALs(metaServer, true);
Assert.assertEquals(1, procedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
Assert.assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size());
Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size());
// There should be archiveFileCount + 1 WALs after SplitWALProcedure finish
Assert.assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, walFS.listStatus(walArchivePath).length);
}
use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.
the class TestRaceBetweenSCPAndDTP method test.
@Test
public void test() throws Exception {
RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo();
AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
ServerName sn = am.getRegionStates().getRegionState(region).getServerName();
LOG.info("ServerName={}, region={}", sn, region);
ARRIVE_GET_REGIONS_ON_TABLE = new CountDownLatch(1);
RESUME_GET_REGIONS_ON_SERVER = new CountDownLatch(1);
// Assign to local variable because this static gets set to null in above running thread and
// so NPE.
CountDownLatch cdl = ARRIVE_GET_REGIONS_ON_TABLE;
UTIL.getAdmin().disableTableAsync(NAME);
cdl.await();
ProcedureExecutor<?> procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
UTIL.getMiniHBaseCluster().stopRegionServer(sn);
long pid = Procedure.NO_PROC_ID;
do {
Threads.sleep(1);
pid = getSCPPID(procExec);
} while (pid != Procedure.NO_PROC_ID);
final long scppid = pid;
UTIL.waitFor(60000, () -> procExec.isFinished(scppid));
RESUME_GET_REGIONS_ON_SERVER.countDown();
long dtpProcId = procExec.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure).map(p -> (DisableTableProcedure) p).findAny().get().getProcId();
UTIL.waitFor(60000, () -> procExec.isFinished(dtpProcId));
}
Aggregations