use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestLoadProcedureError method testLoadError.
@Test
public void testLoadError() throws Exception {
ProcedureExecutor<MasterProcedureEnv> procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
ARRIVE = new CountDownLatch(1);
long procId = procExec.submitProcedure(new TestProcedure());
ARRIVE.await();
FAIL_LOAD = true;
// do not persist the store tracker
UTIL.getMiniHBaseCluster().getMaster().getProcedureStore().stop(true);
UTIL.getMiniHBaseCluster().getMaster().abort("for testing");
waitNoMaster();
// restart twice, and should fail twice, as we will throw an exception in the afterReplay above
// in order to reproduce the problem in HBASE-21490 stably, here we will wait until a master is
// fully done, before starting the new master, otherwise the new master may start too early and
// call recoverLease on the proc wal files and cause we fail to persist the store tracker when
// shutting down
UTIL.getMiniHBaseCluster().startMaster();
waitNoMaster();
UTIL.getMiniHBaseCluster().startMaster();
waitNoMaster();
FAIL_LOAD = false;
HMaster master = UTIL.getMiniHBaseCluster().startMaster().getMaster();
UTIL.waitFor(30000, () -> master.isActiveMaster() && master.isInitialized());
// assert the procedure is still there and not finished yet
TestProcedure proc = (TestProcedure) master.getMasterProcedureExecutor().getProcedure(procId);
assertFalse(proc.isFinished());
FINISH_PROC = true;
UTIL.waitFor(30000, () -> proc.isFinished());
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitWALManager method testCreateSplitWALProcedures.
@Test
public void testCreateSplitWALProcedures() throws Exception {
TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
// load table
TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY);
ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
Path metaWALDir = new Path(TEST_UTIL.getDefaultRootDirPath(), AbstractFSWALProvider.getWALDirectoryName(metaServer.toString()));
// Test splitting meta wal
FileStatus[] wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.META_FILTER);
Assert.assertEquals(1, wals.length);
List<Procedure> testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
Assert.assertEquals(1, testProcedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
// Test splitting wal
wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.NON_META_FILTER);
Assert.assertEquals(1, wals.length);
testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
Assert.assertEquals(1, testProcedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitWALManager method splitLogsTestHelper.
private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception {
HMaster hmaster = testUtil.getHBaseCluster().getMaster();
SplitWALManager splitWALManager = hmaster.getSplitWALManager();
LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem().getFileSystem().getUri());
LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem().getWALFileSystem().getUri());
testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE);
// load table
testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY);
ProcedureExecutor<MasterProcedureEnv> masterPE = hmaster.getMasterProcedureExecutor();
ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta();
ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream().map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny().get();
List<Procedure> procedures = splitWALManager.splitWALs(testServer, false);
Assert.assertEquals(1, procedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
Assert.assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size());
// Validate the old WAL file archive dir
Path walRootDir = hmaster.getMasterFileSystem().getWALRootDir();
Path walArchivePath = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
FileSystem walFS = hmaster.getMasterFileSystem().getWALFileSystem();
int archiveFileCount = walFS.listStatus(walArchivePath).length;
procedures = splitWALManager.splitWALs(metaServer, true);
Assert.assertEquals(1, procedures.size());
ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
Assert.assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size());
Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size());
// There should be archiveFileCount + 1 WALs after SplitWALProcedure finish
Assert.assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, walFS.listStatus(walArchivePath).length);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestRegionSplit method testSplitTableRegion.
@Test
public void testSplitTableRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName);
insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
assertTrue("not able to split table", UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
// disable table
UTIL.getAdmin().disableTable(tableName);
Thread.sleep(500);
// stop master
UTIL.getHBaseCluster().stopMaster(0);
UTIL.getHBaseCluster().waitOnMaster(0);
Thread.sleep(500);
// restart master
JVMClusterUtil.MasterThread t = UTIL.getHBaseCluster().startMaster();
Thread.sleep(500);
UTIL.invalidateConnection();
// enable table
UTIL.getAdmin().enableTable(tableName);
Thread.sleep(500);
List<HRegion> tableRegions = UTIL.getHBaseCluster().getRegions(tableName);
assertEquals("Table region not correct.", 2, tableRegions.size());
Map<RegionInfo, ServerName> regionInfoMap = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionAssignments();
assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo()));
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestRegionSplit method testSplitStoreFiles.
@Test
public void testSplitStoreFiles() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName);
// flush the memstore
insertData(UTIL, tableName, rowCount, startRowNum, true, columnFamilyName);
// assert the hfile count of the table
int storeFilesCountSum = 0;
for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) {
storeFilesCountSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size();
}
assertEquals(1, storeFilesCountSum);
// split at the start row
byte[] splitKey = Bytes.toBytes("" + startRowNum);
assertNotNull("Not able to find a splittable region", regions);
assertEquals("Not able to find a splittable region", 1, regions.length);
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
assertEquals("Not able to split table", 2, UTIL.getHBaseCluster().getRegions(tableName).size());
// assert sum of the hfiles of all regions
int childStoreFilesSum = 0;
for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) {
childStoreFilesSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size();
}
assertEquals(1, childStoreFilesSum);
List<HRegion> tableRegions = UTIL.getHBaseCluster().getRegions(tableName);
assertEquals("Table region not correct.", 2, tableRegions.size());
Map<RegionInfo, ServerName> regionInfoMap = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionAssignments();
assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo()));
}
Aggregations