Search in sources :

Example 26 with MasterCoprocessorHost

use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.

the class MasterFlushTableProcedureManager method execProcedure.

@Override
public void execProcedure(ProcedureDescription desc) throws IOException {
    TableName tableName = TableName.valueOf(desc.getInstance());
    // call pre coproc hook
    MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
    if (cpHost != null) {
        cpHost.preTableFlush(tableName);
    }
    // Get the list of region servers that host the online regions for table.
    // We use the procedure instance name to carry the table name from the client.
    // It is possible that regions may move after we get the region server list.
    // Each region server will get its own online regions for the table.
    // We may still miss regions that need to be flushed.
    List<Pair<HRegionInfo, ServerName>> regionsAndLocations;
    if (TableName.META_TABLE_NAME.equals(tableName)) {
        regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(master.getZooKeeper());
    } else {
        regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName, false);
    }
    Set<String> regionServers = new HashSet<>(regionsAndLocations.size());
    for (Pair<HRegionInfo, ServerName> region : regionsAndLocations) {
        if (region != null && region.getFirst() != null && region.getSecond() != null) {
            HRegionInfo hri = region.getFirst();
            if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent()))
                continue;
            regionServers.add(region.getSecond().toString());
        }
    }
    ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance());
    // Kick of the global procedure from the master coordinator to the region servers.
    // We rely on the existing Distributed Procedure framework to prevent any concurrent
    // procedure with the same name.
    Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), new byte[0], Lists.newArrayList(regionServers));
    monitor.rethrowException();
    if (proc == null) {
        String msg = "Failed to submit distributed procedure " + desc.getSignature() + " for '" + desc.getInstance() + "'. " + "Another flush procedure is running?";
        LOG.error(msg);
        throw new IOException(msg);
    }
    procMap.put(tableName, proc);
    try {
        // wait for the procedure to complete.  A timer thread is kicked off that should cancel this
        // if it takes too long.
        proc.waitForCompleted();
        LOG.info("Done waiting - exec procedure " + desc.getSignature() + " for '" + desc.getInstance() + "'");
        LOG.info("Master flush table procedure is successful!");
    } catch (InterruptedException e) {
        ForeignException ee = new ForeignException("Interrupted while waiting for flush table procdure to finish", e);
        monitor.receive(ee);
        Thread.currentThread().interrupt();
    } catch (ForeignException e) {
        ForeignException ee = new ForeignException("Exception while waiting for flush table procdure to finish", e);
        monitor.receive(ee);
    }
    monitor.rethrowException();
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) IOException(java.io.IOException) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) ServerName(org.apache.hadoop.hbase.ServerName) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) Procedure(org.apache.hadoop.hbase.procedure.Procedure) Pair(org.apache.hadoop.hbase.util.Pair) HashSet(java.util.HashSet)

Example 27 with MasterCoprocessorHost

use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.

the class MergeTableRegionsProcedure method preMergeRegionsCommit.

/**
   * Post merge region action
   * @param env MasterProcedureEnv
   **/
private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        @MetaMutationAnnotation final List<Mutation> metaEntries = new ArrayList<>();
        boolean ret = cpHost.preMergeRegionsCommit(regionsToMerge, metaEntries, getUser());
        if (ret) {
            throw new IOException("Coprocessor bypassing regions " + getRegionsToMergeListFullNameString() + " merge.");
        }
        try {
            for (Mutation p : metaEntries) {
                HRegionInfo.parseRegionName(p.getRow());
            }
        } catch (IOException e) {
            LOG.error("Row key of mutation from coprocessor is not parsable as region name." + "Mutations from coprocessor should only be for hbase:meta table.", e);
            throw e;
        }
    }
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) MetaMutationAnnotation(org.apache.hadoop.hbase.MetaMutationAnnotation) ArrayList(java.util.ArrayList) Mutation(org.apache.hadoop.hbase.client.Mutation) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Example 28 with MasterCoprocessorHost

use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.

the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.

@Test(timeout = 30000)
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = (BuggyMasterObserver) host.findCoprocessor(BuggyMasterObserver.class.getName());
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK master tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
    CreateTableThread createTableThread = new CreateTableThread(UTIL);
    // Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
    // Master will then abort and the /hbase/master zk node will be deleted.
    createTableThread.start();
    // Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
    for (int i = 0; i < 30; i++) {
        if (masterTracker.masterZKNodeWasDeleted == true) {
            break;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            fail("InterruptedException while waiting for master zk node to " + "be deleted.");
        }
    }
    assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
    createTableThread.interrupt();
    try {
        createTableThread.join(1000);
    } catch (InterruptedException e) {
        assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
    }
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Abortable(org.apache.hadoop.hbase.Abortable) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 29 with MasterCoprocessorHost

use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.

the class TestMasterObserver method testSnapshotOperations.

@Test(timeout = 180000)
public void testSnapshotOperations() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
    cp.resetStates();
    // create a table
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    Admin admin = UTIL.getAdmin();
    tableCreationLatch = new CountDownLatch(1);
    admin.createTable(htd);
    tableCreationLatch.await();
    tableCreationLatch = new CountDownLatch(1);
    admin.disableTable(tableName);
    assertTrue(admin.isTableDisabled(tableName));
    try {
        // Test snapshot operation
        assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled());
        admin.snapshot(TEST_SNAPSHOT, tableName);
        assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled());
        //Test list operation
        admin.listSnapshots();
        assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled());
        // Test clone operation
        admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
        assertTrue("Coprocessor should have been called on snapshot clone", cp.wasCloneSnapshotCalled());
        assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled());
        admin.disableTable(TEST_CLONE);
        assertTrue(admin.isTableDisabled(tableName));
        deleteTable(admin, TEST_CLONE);
        // Test restore operation
        cp.resetStates();
        admin.restoreSnapshot(TEST_SNAPSHOT);
        assertTrue("Coprocessor should have been called on snapshot restore", cp.wasRestoreSnapshotCalled());
        assertFalse("Coprocessor clone should not have been called on snapshot restore", cp.wasCloneSnapshotCalled());
        admin.deleteSnapshot(TEST_SNAPSHOT);
        assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled());
    } finally {
        deleteTable(admin, tableName);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HMaster(org.apache.hadoop.hbase.master.HMaster) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 30 with MasterCoprocessorHost

use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.

the class TestAccessController2 method testCoprocessorLoading.

@Test(timeout = 180000)
public void testCoprocessorLoading() throws Exception {
    MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
    cpHost.load(MyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
    AccessController ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(MyAccessController.class.getName());
    MasterCoprocessorEnvironment CP_ENV = cpHost.createEnvironment(MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
    RegionServerCoprocessorEnvironment RSCP_ENV = rsHost.createEnvironment(MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) RegionServerCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment) Test(org.junit.Test)

Aggregations

MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)37 Test (org.junit.Test)15 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)12 TableName (org.apache.hadoop.hbase.TableName)11 HMaster (org.apache.hadoop.hbase.master.HMaster)11 IOException (java.io.IOException)9 RegionServerCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)6 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 BeforeClass (org.junit.BeforeClass)6 Admin (org.apache.hadoop.hbase.client.Admin)5 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 ArrayList (java.util.ArrayList)3 Configuration (org.apache.hadoop.conf.Configuration)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)3 InterruptedIOException (java.io.InterruptedIOException)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2