Search in sources :

Example 41 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestLoadProcedureError method testLoadError.

@Test
public void testLoadError() throws Exception {
    ProcedureExecutor<MasterProcedureEnv> procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
    ARRIVE = new CountDownLatch(1);
    long procId = procExec.submitProcedure(new TestProcedure());
    ARRIVE.await();
    FAIL_LOAD = true;
    // do not persist the store tracker
    UTIL.getMiniHBaseCluster().getMaster().getProcedureStore().stop(true);
    UTIL.getMiniHBaseCluster().getMaster().abort("for testing");
    waitNoMaster();
    // restart twice, and should fail twice, as we will throw an exception in the afterReplay above
    // in order to reproduce the problem in HBASE-21490 stably, here we will wait until a master is
    // fully done, before starting the new master, otherwise the new master may start too early and
    // call recoverLease on the proc wal files and cause we fail to persist the store tracker when
    // shutting down
    UTIL.getMiniHBaseCluster().startMaster();
    waitNoMaster();
    UTIL.getMiniHBaseCluster().startMaster();
    waitNoMaster();
    FAIL_LOAD = false;
    HMaster master = UTIL.getMiniHBaseCluster().startMaster().getMaster();
    UTIL.waitFor(30000, () -> master.isActiveMaster() && master.isInitialized());
    // assert the procedure is still there and not finished yet
    TestProcedure proc = (TestProcedure) master.getMasterProcedureExecutor().getProcedure(procId);
    assertFalse(proc.isFinished());
    FINISH_PROC = true;
    UTIL.waitFor(30000, () -> proc.isFinished());
}
Also used : MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 42 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestSplitWALManager method testCreateSplitWALProcedures.

@Test
public void testCreateSplitWALProcedures() throws Exception {
    TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
    // load table
    TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY);
    ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
    ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
    Path metaWALDir = new Path(TEST_UTIL.getDefaultRootDirPath(), AbstractFSWALProvider.getWALDirectoryName(metaServer.toString()));
    // Test splitting meta wal
    FileStatus[] wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.META_FILTER);
    Assert.assertEquals(1, wals.length);
    List<Procedure> testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
    Assert.assertEquals(1, testProcedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
    Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
    // Test splitting wal
    wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.NON_META_FILTER);
    Assert.assertEquals(1, wals.length);
    testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
    Assert.assertEquals(1, testProcedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
    Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ServerName(org.apache.hadoop.hbase.ServerName) StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Test(org.junit.Test)

Example 43 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestSplitWALManager method splitLogsTestHelper.

private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception {
    HMaster hmaster = testUtil.getHBaseCluster().getMaster();
    SplitWALManager splitWALManager = hmaster.getSplitWALManager();
    LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem().getFileSystem().getUri());
    LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem().getWALFileSystem().getUri());
    testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE);
    // load table
    testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY);
    ProcedureExecutor<MasterProcedureEnv> masterPE = hmaster.getMasterProcedureExecutor();
    ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta();
    ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream().map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny().get();
    List<Procedure> procedures = splitWALManager.splitWALs(testServer, false);
    Assert.assertEquals(1, procedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
    Assert.assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size());
    // Validate the old WAL file archive dir
    Path walRootDir = hmaster.getMasterFileSystem().getWALRootDir();
    Path walArchivePath = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
    FileSystem walFS = hmaster.getMasterFileSystem().getWALFileSystem();
    int archiveFileCount = walFS.listStatus(walArchivePath).length;
    procedures = splitWALManager.splitWALs(metaServer, true);
    Assert.assertEquals(1, procedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
    Assert.assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size());
    Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size());
    // There should be archiveFileCount + 1 WALs after SplitWALProcedure finish
    Assert.assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, walFS.listStatus(walArchivePath).length);
}
Also used : StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) ProcedureSuspendedException(org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) HBASE_SPLIT_WAL_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK) FileStatus(org.apache.hadoop.fs.FileStatus) ServerProcedureInterface(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface) ArrayList(java.util.ArrayList) HConstants(org.apache.hadoop.hbase.HConstants) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) ProcedureYieldException(org.apache.hadoop.hbase.procedure2.ProcedureYieldException) ProcedureStateSerializer(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer) After(org.junit.After) Path(org.apache.hadoop.fs.Path) ClassRule(org.junit.ClassRule) MasterProcedureProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) ServerName(org.apache.hadoop.hbase.ServerName) SPLIT_WAL(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SPLIT_WAL) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HBASE_SPLIT_WAL_MAX_SPLITTER(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) ProcedureTestingUtility(org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Category(org.junit.experimental.categories.Category) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Assert(org.junit.Assert) Path(org.apache.hadoop.fs.Path) ServerName(org.apache.hadoop.hbase.ServerName) FileSystem(org.apache.hadoop.fs.FileSystem) StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)

Example 44 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestRegionSplit method testSplitTableRegion.

@Test
public void testSplitTableRegion() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName);
    insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName);
    int splitRowNum = startRowNum + rowCount / 2;
    byte[] splitKey = Bytes.toBytes("" + splitRowNum);
    assertTrue("not able to find a splittable region", regions != null);
    assertTrue("not able to find a splittable region", regions.length == 1);
    // Split region of the table
    long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
    // Wait the completion
    ProcedureTestingUtility.waitProcedure(procExec, procId);
    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
    assertTrue("not able to split table", UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
    // disable table
    UTIL.getAdmin().disableTable(tableName);
    Thread.sleep(500);
    // stop master
    UTIL.getHBaseCluster().stopMaster(0);
    UTIL.getHBaseCluster().waitOnMaster(0);
    Thread.sleep(500);
    // restart master
    JVMClusterUtil.MasterThread t = UTIL.getHBaseCluster().startMaster();
    Thread.sleep(500);
    UTIL.invalidateConnection();
    // enable table
    UTIL.getAdmin().enableTable(tableName);
    Thread.sleep(500);
    List<HRegion> tableRegions = UTIL.getHBaseCluster().getRegions(tableName);
    assertEquals("Table region not correct.", 2, tableRegions.size());
    Map<RegionInfo, ServerName> regionInfoMap = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionAssignments();
    assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo()));
}
Also used : MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 45 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestRegionSplit method testSplitStoreFiles.

@Test
public void testSplitStoreFiles() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName);
    // flush the memstore
    insertData(UTIL, tableName, rowCount, startRowNum, true, columnFamilyName);
    // assert the hfile count of the table
    int storeFilesCountSum = 0;
    for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) {
        storeFilesCountSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size();
    }
    assertEquals(1, storeFilesCountSum);
    // split at the start row
    byte[] splitKey = Bytes.toBytes("" + startRowNum);
    assertNotNull("Not able to find a splittable region", regions);
    assertEquals("Not able to find a splittable region", 1, regions.length);
    // Split region of the table
    long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
    // Wait the completion
    ProcedureTestingUtility.waitProcedure(procExec, procId);
    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
    assertEquals("Not able to split table", 2, UTIL.getHBaseCluster().getRegions(tableName).size());
    // assert sum of the hfiles of all regions
    int childStoreFilesSum = 0;
    for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) {
        childStoreFilesSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size();
    }
    assertEquals(1, childStoreFilesSum);
    List<HRegion> tableRegions = UTIL.getHBaseCluster().getRegions(tableName);
    assertEquals("Table region not correct.", 2, tableRegions.size());
    Map<RegionInfo, ServerName> regionInfoMap = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionAssignments();
    assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo()));
}
Also used : MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Aggregations

MasterProcedureEnv (org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)65 Test (org.junit.Test)48 TableName (org.apache.hadoop.hbase.TableName)42 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)39 IOException (java.io.IOException)16 ProcedureExecutor (org.apache.hadoop.hbase.procedure2.ProcedureExecutor)16 ArrayList (java.util.ArrayList)13 Path (org.apache.hadoop.fs.Path)13 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)13 MasterTests (org.apache.hadoop.hbase.testclassification.MasterTests)13 Bytes (org.apache.hadoop.hbase.util.Bytes)13 ClassRule (org.junit.ClassRule)13 Category (org.junit.experimental.categories.Category)13 BeforeClass (org.junit.BeforeClass)12 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)11 ServerName (org.apache.hadoop.hbase.ServerName)10 Procedure (org.apache.hadoop.hbase.procedure2.Procedure)10 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)10 MediumTests (org.apache.hadoop.hbase.testclassification.MediumTests)10 AfterClass (org.junit.AfterClass)10