Search in sources :

Example 26 with ProcedureExecutor

use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.

the class TestSplitWALProcedure method testHandleDeadWorker.

@Test
public void testHandleDeadWorker() throws Exception {
    Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
    for (int i = 0; i < 10; i++) {
        TEST_UTIL.loadTable(table, FAMILY);
    }
    HRegionServer testServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
    ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
    List<FileStatus> wals = splitWALManager.getWALsToSplit(testServer.getServerName(), false);
    Assert.assertEquals(1, wals.size());
    TEST_UTIL.getHBaseCluster().killRegionServer(testServer.getServerName());
    TEST_UTIL.waitFor(30000, () -> master.getProcedures().stream().anyMatch(procedure -> procedure instanceof SplitWALProcedure));
    Procedure splitWALProcedure = master.getProcedures().stream().filter(procedure -> procedure instanceof SplitWALProcedure).findAny().get();
    Assert.assertNotNull(splitWALProcedure);
    TEST_UTIL.waitFor(5000, () -> ((SplitWALProcedure) splitWALProcedure).getWorker() != null);
    TEST_UTIL.getHBaseCluster().killRegionServer(((SplitWALProcedure) splitWALProcedure).getWorker());
    ProcedureTestingUtility.waitProcedure(masterPE, splitWALProcedure.getProcId());
    Assert.assertTrue(splitWALProcedure.isSuccess());
    ProcedureTestingUtility.waitAllProcedures(masterPE);
}
Also used : HBASE_SPLIT_WAL_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK) FileStatus(org.apache.hadoop.fs.FileStatus) HConstants(org.apache.hadoop.hbase.HConstants) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) After(org.junit.After) ClassRule(org.junit.ClassRule) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBASE_SPLIT_WAL_MAX_SPLITTER(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) ProcedureTestingUtility(org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) SplitWALManager(org.apache.hadoop.hbase.master.SplitWALManager) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) List(java.util.List) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Optional(java.util.Optional) Table(org.apache.hadoop.hbase.client.Table) Assert(org.junit.Assert) HMaster(org.apache.hadoop.hbase.master.HMaster) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 27 with ProcedureExecutor

use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.

the class MasterProcedureTestingUtility method restartMasterProcedureExecutor.

public static void restartMasterProcedureExecutor(ProcedureExecutor<MasterProcedureEnv> procExec) throws Exception {
    final MasterProcedureEnv env = procExec.getEnvironment();
    final HMaster master = (HMaster) env.getMasterServices();
    ProcedureTestingUtility.restart(procExec, true, true, // stop services
    new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            AssignmentManager am = env.getAssignmentManager();
            // try to simulate a master restart by removing the ServerManager states about seqIDs
            for (RegionState regionState : am.getRegionStates().getRegionStates()) {
                env.getMasterServices().getServerManager().removeRegion(regionState.getRegion());
            }
            am.stop();
            master.setInitialized(false);
            return null;
        }
    }, // setup RIT before starting workers
    new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            AssignmentManager am = env.getAssignmentManager();
            am.start();
            // just follow the same way with HMaster.finishActiveMasterInitialization. See the
            // comments there
            am.setupRIT(procExec.getActiveProceduresNoCopy().stream().filter(p -> !p.isSuccess()).filter(p -> p instanceof TransitRegionStateProcedure).map(p -> (TransitRegionStateProcedure) p).collect(Collectors.toList()));
            return null;
        }
    }, // restart services
    new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            AssignmentManager am = env.getAssignmentManager();
            try {
                am.joinCluster();
                am.wakeMetaLoadedEvent();
                master.setInitialized(true);
            } catch (Exception e) {
                LOG.warn("Failed to load meta", e);
            }
            return null;
        }
    });
}
Also used : CatalogFamilyFormat(org.apache.hadoop.hbase.CatalogFamilyFormat) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) RegionLocations(org.apache.hadoop.hbase.RegionLocations) LoggerFactory(org.slf4j.LoggerFactory) RegionState(org.apache.hadoop.hbase.master.RegionState) MD5Hash(org.apache.hadoop.hbase.util.MD5Hash) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) Durability(org.apache.hadoop.hbase.client.Durability) TableStateManager(org.apache.hadoop.hbase.master.TableStateManager) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Collectors(java.util.stream.Collectors) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) StoreFileTrackerFactory(org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) TRACKER_IMPL(org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HMaster(org.apache.hadoop.hbase.master.HMaster) FSUtils(org.apache.hadoop.hbase.util.FSUtils) StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) ModifyRegionUtils(org.apache.hadoop.hbase.util.ModifyRegionUtils) Callable(java.util.concurrent.Callable) SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableState(org.apache.hadoop.hbase.client.TableState) TreeSet(java.util.TreeSet) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) Put(org.apache.hadoop.hbase.client.Put) Assert.assertTrue(org.junit.Assert.assertTrue) ProcedureTestingUtility(org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility) IOException(java.io.IOException) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) ClientMetaTableAccessor(org.apache.hadoop.hbase.ClientMetaTableAccessor) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Admin(org.apache.hadoop.hbase.client.Admin) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) Connection(org.apache.hadoop.hbase.client.Connection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Assert.assertEquals(org.junit.Assert.assertEquals) RegionState(org.apache.hadoop.hbase.master.RegionState) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) HMaster(org.apache.hadoop.hbase.master.HMaster) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) IOException(java.io.IOException)

Example 28 with ProcedureExecutor

use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.

the class TestSplitWALManager method testCreateSplitWALProcedures.

@Test
public void testCreateSplitWALProcedures() throws Exception {
    TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
    // load table
    TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY);
    ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
    ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
    Path metaWALDir = new Path(TEST_UTIL.getDefaultRootDirPath(), AbstractFSWALProvider.getWALDirectoryName(metaServer.toString()));
    // Test splitting meta wal
    FileStatus[] wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.META_FILTER);
    Assert.assertEquals(1, wals.length);
    List<Procedure> testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
    Assert.assertEquals(1, testProcedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
    Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
    // Test splitting wal
    wals = TEST_UTIL.getTestFileSystem().listStatus(metaWALDir, MasterWalManager.NON_META_FILTER);
    Assert.assertEquals(1, wals.length);
    testProcedures = splitWALManager.createSplitWALProcedures(Lists.newArrayList(wals[0]), metaServer);
    Assert.assertEquals(1, testProcedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, testProcedures.get(0));
    Assert.assertFalse(TEST_UTIL.getTestFileSystem().exists(wals[0].getPath()));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ServerName(org.apache.hadoop.hbase.ServerName) StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Test(org.junit.Test)

Example 29 with ProcedureExecutor

use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.

the class TestSplitWALManager method splitLogsTestHelper.

private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception {
    HMaster hmaster = testUtil.getHBaseCluster().getMaster();
    SplitWALManager splitWALManager = hmaster.getSplitWALManager();
    LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem().getFileSystem().getUri());
    LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem().getWALFileSystem().getUri());
    testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE);
    // load table
    testUtil.loadTable(testUtil.getConnection().getTable(TABLE_NAME), FAMILY);
    ProcedureExecutor<MasterProcedureEnv> masterPE = hmaster.getMasterProcedureExecutor();
    ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta();
    ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream().map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny().get();
    List<Procedure> procedures = splitWALManager.splitWALs(testServer, false);
    Assert.assertEquals(1, procedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
    Assert.assertEquals(0, splitWALManager.getWALsToSplit(testServer, false).size());
    // Validate the old WAL file archive dir
    Path walRootDir = hmaster.getMasterFileSystem().getWALRootDir();
    Path walArchivePath = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
    FileSystem walFS = hmaster.getMasterFileSystem().getWALFileSystem();
    int archiveFileCount = walFS.listStatus(walArchivePath).length;
    procedures = splitWALManager.splitWALs(metaServer, true);
    Assert.assertEquals(1, procedures.size());
    ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0));
    Assert.assertEquals(0, splitWALManager.getWALsToSplit(metaServer, true).size());
    Assert.assertEquals(1, splitWALManager.getWALsToSplit(metaServer, false).size());
    // There should be archiveFileCount + 1 WALs after SplitWALProcedure finish
    Assert.assertEquals("Splitted WAL files should be archived", archiveFileCount + 1, walFS.listStatus(walArchivePath).length);
}
Also used : StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) ProcedureSuspendedException(org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) HBASE_SPLIT_WAL_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK) FileStatus(org.apache.hadoop.fs.FileStatus) ServerProcedureInterface(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface) ArrayList(java.util.ArrayList) HConstants(org.apache.hadoop.hbase.HConstants) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) ProcedureYieldException(org.apache.hadoop.hbase.procedure2.ProcedureYieldException) ProcedureStateSerializer(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer) After(org.junit.After) Path(org.apache.hadoop.fs.Path) ClassRule(org.junit.ClassRule) MasterProcedureProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) ServerName(org.apache.hadoop.hbase.ServerName) SPLIT_WAL(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SPLIT_WAL) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HBASE_SPLIT_WAL_MAX_SPLITTER(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) ProcedureTestingUtility(org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Category(org.junit.experimental.categories.Category) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Assert(org.junit.Assert) Path(org.apache.hadoop.fs.Path) ServerName(org.apache.hadoop.hbase.ServerName) FileSystem(org.apache.hadoop.fs.FileSystem) StateMachineProcedure(org.apache.hadoop.hbase.procedure2.StateMachineProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)

Example 30 with ProcedureExecutor

use of org.apache.hadoop.hbase.procedure2.ProcedureExecutor in project hbase by apache.

the class TestRaceBetweenSCPAndDTP method test.

@Test
public void test() throws Exception {
    RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo();
    AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
    ServerName sn = am.getRegionStates().getRegionState(region).getServerName();
    LOG.info("ServerName={}, region={}", sn, region);
    ARRIVE_GET_REGIONS_ON_TABLE = new CountDownLatch(1);
    RESUME_GET_REGIONS_ON_SERVER = new CountDownLatch(1);
    // Assign to local variable because this static gets set to null in above running thread and
    // so NPE.
    CountDownLatch cdl = ARRIVE_GET_REGIONS_ON_TABLE;
    UTIL.getAdmin().disableTableAsync(NAME);
    cdl.await();
    ProcedureExecutor<?> procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
    UTIL.getMiniHBaseCluster().stopRegionServer(sn);
    long pid = Procedure.NO_PROC_ID;
    do {
        Threads.sleep(1);
        pid = getSCPPID(procExec);
    } while (pid != Procedure.NO_PROC_ID);
    final long scppid = pid;
    UTIL.waitFor(60000, () -> procExec.isFinished(scppid));
    RESUME_GET_REGIONS_ON_SERVER.countDown();
    long dtpProcId = procExec.getProcedures().stream().filter(p -> p instanceof DisableTableProcedure).map(p -> (DisableTableProcedure) p).findAny().get().getProcId();
    UTIL.waitFor(60000, () -> procExec.isFinished(dtpProcId));
}
Also used : BeforeClass(org.junit.BeforeClass) LoggerFactory(org.slf4j.LoggerFactory) HConstants(org.apache.hadoop.hbase.HConstants) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) Configuration(org.apache.hadoop.conf.Configuration) MasterServices(org.apache.hadoop.hbase.master.MasterServices) ClassRule(org.junit.ClassRule) ServerName(org.apache.hadoop.hbase.ServerName) Threads(org.apache.hadoop.hbase.util.Threads) Bytes(org.apache.hadoop.hbase.util.Bytes) TableName(org.apache.hadoop.hbase.TableName) MasterRegion(org.apache.hadoop.hbase.master.region.MasterRegion) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) KeeperException(org.apache.zookeeper.KeeperException) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) CountDownLatch(java.util.concurrent.CountDownLatch) DisableTableProcedure(org.apache.hadoop.hbase.master.procedure.DisableTableProcedure) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Optional(java.util.Optional) ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HMaster(org.apache.hadoop.hbase.master.HMaster) ServerName(org.apache.hadoop.hbase.ServerName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) CountDownLatch(java.util.concurrent.CountDownLatch) DisableTableProcedure(org.apache.hadoop.hbase.master.procedure.DisableTableProcedure) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)25 ProcedureExecutor (org.apache.hadoop.hbase.procedure2.ProcedureExecutor)24 TableName (org.apache.hadoop.hbase.TableName)22 MasterProcedureEnv (org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)22 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)17 MasterTests (org.apache.hadoop.hbase.testclassification.MasterTests)17 Bytes (org.apache.hadoop.hbase.util.Bytes)17 ClassRule (org.junit.ClassRule)17 Category (org.junit.experimental.categories.Category)17 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)16 IOException (java.io.IOException)15 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)15 Procedure (org.apache.hadoop.hbase.procedure2.Procedure)14 BeforeClass (org.junit.BeforeClass)14 MediumTests (org.apache.hadoop.hbase.testclassification.MediumTests)12 AfterClass (org.junit.AfterClass)12 HMaster (org.apache.hadoop.hbase.master.HMaster)11 Configuration (org.apache.hadoop.conf.Configuration)10 Logger (org.slf4j.Logger)10 LoggerFactory (org.slf4j.LoggerFactory)10