use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class MasterFlushTableProcedureManager method execProcedure.
@Override
public void execProcedure(ProcedureDescription desc) throws IOException {
TableName tableName = TableName.valueOf(desc.getInstance());
// call pre coproc hook
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preTableFlush(tableName);
}
// Get the list of region servers that host the online regions for table.
// We use the procedure instance name to carry the table name from the client.
// It is possible that regions may move after we get the region server list.
// Each region server will get its own online regions for the table.
// We may still miss regions that need to be flushed.
List<Pair<HRegionInfo, ServerName>> regionsAndLocations;
if (TableName.META_TABLE_NAME.equals(tableName)) {
regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(master.getZooKeeper());
} else {
regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName, false);
}
Set<String> regionServers = new HashSet<>(regionsAndLocations.size());
for (Pair<HRegionInfo, ServerName> region : regionsAndLocations) {
if (region != null && region.getFirst() != null && region.getSecond() != null) {
HRegionInfo hri = region.getFirst();
if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent()))
continue;
regionServers.add(region.getSecond().toString());
}
}
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance());
// Kick of the global procedure from the master coordinator to the region servers.
// We rely on the existing Distributed Procedure framework to prevent any concurrent
// procedure with the same name.
Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), new byte[0], Lists.newArrayList(regionServers));
monitor.rethrowException();
if (proc == null) {
String msg = "Failed to submit distributed procedure " + desc.getSignature() + " for '" + desc.getInstance() + "'. " + "Another flush procedure is running?";
LOG.error(msg);
throw new IOException(msg);
}
procMap.put(tableName, proc);
try {
// wait for the procedure to complete. A timer thread is kicked off that should cancel this
// if it takes too long.
proc.waitForCompleted();
LOG.info("Done waiting - exec procedure " + desc.getSignature() + " for '" + desc.getInstance() + "'");
LOG.info("Master flush table procedure is successful!");
} catch (InterruptedException e) {
ForeignException ee = new ForeignException("Interrupted while waiting for flush table procdure to finish", e);
monitor.receive(ee);
Thread.currentThread().interrupt();
} catch (ForeignException e) {
ForeignException ee = new ForeignException("Exception while waiting for flush table procdure to finish", e);
monitor.receive(ee);
}
monitor.rethrowException();
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class MergeTableRegionsProcedure method preMergeRegionsCommit.
/**
* Post merge region action
* @param env MasterProcedureEnv
**/
private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
@MetaMutationAnnotation final List<Mutation> metaEntries = new ArrayList<>();
boolean ret = cpHost.preMergeRegionsCommit(regionsToMerge, metaEntries, getUser());
if (ret) {
throw new IOException("Coprocessor bypassing regions " + getRegionsToMergeListFullNameString() + " merge.");
}
try {
for (Mutation p : metaEntries) {
HRegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
LOG.error("Row key of mutation from coprocessor is not parsable as region name." + "Mutations from coprocessor should only be for hbase:meta table.", e);
throw e;
}
}
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestMasterCoprocessorExceptionWithAbort method testExceptionFromCoprocessorWhenCreatingTable.
@Test(timeout = 30000)
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
BuggyMasterObserver cp = (BuggyMasterObserver) host.findCoprocessor(BuggyMasterObserver.class.getName());
assertFalse("No table created yet", cp.wasCreateTableCalled());
// set a watch on the zookeeper /hbase/master node. If the master dies,
// the node will be deleted.
ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK error: " + why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Fatal ZK master tracker error, why=", e);
}
@Override
public boolean isAborted() {
return false;
}
});
masterTracker.start();
zkw.registerListener(masterTracker);
// Test (part of the) output that should have be printed by master when it aborts:
// (namely the part that shows the set of loaded coprocessors).
// In this test, there is only a single coprocessor (BuggyMasterObserver).
assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName()));
CreateTableThread createTableThread = new CreateTableThread(UTIL);
// Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver.
// Master will then abort and the /hbase/master zk node will be deleted.
createTableThread.start();
// Wait up to 30 seconds for master's /hbase/master zk node to go away after master aborts.
for (int i = 0; i < 30; i++) {
if (masterTracker.masterZKNodeWasDeleted == true) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail("InterruptedException while waiting for master zk node to " + "be deleted.");
}
}
assertTrue("Master aborted on coprocessor exception, as expected.", masterTracker.masterZKNodeWasDeleted);
createTableThread.interrupt();
try {
createTableThread.join(1000);
} catch (InterruptedException e) {
assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", true);
}
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestMasterObserver method testSnapshotOperations.
@Test(timeout = 180000)
public void testSnapshotOperations() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
cp.resetStates();
// create a table
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
Admin admin = UTIL.getAdmin();
tableCreationLatch = new CountDownLatch(1);
admin.createTable(htd);
tableCreationLatch.await();
tableCreationLatch = new CountDownLatch(1);
admin.disableTable(tableName);
assertTrue(admin.isTableDisabled(tableName));
try {
// Test snapshot operation
assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled());
admin.snapshot(TEST_SNAPSHOT, tableName);
assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled());
//Test list operation
admin.listSnapshots();
assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled());
// Test clone operation
admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
assertTrue("Coprocessor should have been called on snapshot clone", cp.wasCloneSnapshotCalled());
assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled());
admin.disableTable(TEST_CLONE);
assertTrue(admin.isTableDisabled(tableName));
deleteTable(admin, TEST_CLONE);
// Test restore operation
cp.resetStates();
admin.restoreSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot restore", cp.wasRestoreSnapshotCalled());
assertFalse("Coprocessor clone should not have been called on snapshot restore", cp.wasCloneSnapshotCalled());
admin.deleteSnapshot(TEST_SNAPSHOT);
assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled());
} finally {
deleteTable(admin, tableName);
}
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestAccessController2 method testCoprocessorLoading.
@Test(timeout = 180000)
public void testCoprocessorLoading() throws Exception {
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(MyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
AccessController ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(MyAccessController.class.getName());
MasterCoprocessorEnvironment CP_ENV = cpHost.createEnvironment(MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
RegionServerCoprocessorEnvironment RSCP_ENV = rsHost.createEnvironment(MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
}
Aggregations