use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class CoprocessorHost method loadInstance.
/**
* @param implClass Implementation class
* @param priority priority
* @param conf configuration
* @throws java.io.IOException Exception
*/
public E loadInstance(Class<?> implClass, int priority, Configuration conf) throws IOException {
if (!Coprocessor.class.isAssignableFrom(implClass)) {
throw new IOException("Configured class " + implClass.getName() + " must implement " + Coprocessor.class.getName() + " interface ");
}
// create the instance
Coprocessor impl;
Object o = null;
try {
o = implClass.newInstance();
impl = (Coprocessor) o;
} catch (InstantiationException e) {
throw new IOException(e);
} catch (IllegalAccessException e) {
throw new IOException(e);
}
// create the environment
E env = createEnvironment(implClass, impl, priority, loadSequence.incrementAndGet(), conf);
if (env instanceof Environment) {
((Environment) env).startup();
}
// HBASE-4014: maintain list of loaded coprocessors for later crash analysis
// if server (master or regionserver) aborts.
coprocessorNames.add(implClass.getName());
return env;
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class RegionCoprocessorHost method execOperation.
private boolean execOperation(final boolean earlyExit, final CoprocessorOperation ctx) throws IOException {
boolean bypass = false;
List<RegionEnvironment> envs = coprocessors.get();
for (int i = 0; i < envs.size(); i++) {
RegionEnvironment env = envs.get(i);
Coprocessor observer = env.getInstance();
if (ctx.hasCall(observer)) {
ctx.prepare(env);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
ctx.call(observer, ctx);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
bypass |= ctx.shouldBypass();
if (earlyExit && ctx.shouldComplete()) {
break;
}
}
ctx.postEnvCall(env);
}
return bypass;
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestWALObserver method getCoprocessor.
private SampleRegionWALCoprocessor getCoprocessor(WAL wal, Class<? extends SampleRegionWALCoprocessor> clazz) throws Exception {
WALCoprocessorHost host = wal.getCoprocessorHost();
Coprocessor c = host.findCoprocessor(clazz.getName());
return (SampleRegionWALCoprocessor) c;
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestCoprocessorInterface method testSharedData.
@Test
public void testSharedData() throws IOException {
TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] families = { fam1, fam2, fam3 };
Configuration hc = initConfig();
HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[] {}, families);
for (int i = 0; i < 3; i++) {
HTestConst.addContent(region, fam3);
region.flush(true);
}
region.compact(false);
region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
Object o = ((CoprocessorImpl) c).getSharedData().get("test1");
Object o2 = ((CoprocessorII) c2).getSharedData().get("test2");
assertNotNull(o);
assertNotNull(o2);
// to coprocessors get different sharedDatas
assertFalse(((CoprocessorImpl) c).getSharedData() == ((CoprocessorII) c2).getSharedData());
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
// make sure that all coprocessor of a class have identical sharedDatas
assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
assertTrue(((CoprocessorII) c2).getSharedData().get("test2") == o2);
// now have all Environments fail
try {
byte[] r = region.getRegionInfo().getStartKey();
if (r == null || r.length <= 0) {
// Its the start row. Can't ask for null. Ask for minimal key instead.
r = new byte[] { 0 };
}
Get g = new Get(r);
region.get(g);
fail();
} catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
}
assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class));
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
c = c2 = null;
// perform a GC
System.gc();
// reopen the region
region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
// CPimpl is unaffected, still the same reference
assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
// new map and object created, hence the reference is different
// hence the old entry was indeed removed by the GC and new one has been created
Object o3 = ((CoprocessorII) c2).getSharedData().get("test2");
assertFalse(o3 == o2);
HBaseTestingUtil.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestRegionObserverInterface method testCompactionOverride.
/**
* Tests overriding compaction handling via coprocessor hooks
* @throws Exception
*/
@Test
public void testCompactionOverride() throws Exception {
final TableName compactTable = TableName.valueOf(name.getMethodName());
Admin admin = util.getAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
}
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(compactTable).setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)).setCoprocessor(EvenOnlyCompactor.class.getName()).build();
admin.createTable(tableDescriptor);
Table table = util.getConnection().getTable(compactTable);
for (long i = 1; i <= 10; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(A, A, iBytes);
table.put(put);
}
HRegion firstRegion = cluster.getRegions(compactTable).get(0);
Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class);
assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
// force a compaction
long ts = EnvironmentEdgeManager.currentTime();
admin.flush(compactTable);
// wait for flush
for (int i = 0; i < 10; i++) {
if (compactor.lastFlush >= ts) {
break;
}
Thread.sleep(1000);
}
assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
LOG.debug("Flush complete");
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
// wait for compaction
for (int i = 0; i < 30; i++) {
if (compactor.lastCompaction >= ts) {
break;
}
Thread.sleep(1000);
}
LOG.debug("Last compaction was at " + compactor.lastCompaction);
assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
// only even rows should remain
ResultScanner scanner = table.getScanner(new Scan());
try {
for (long i = 2; i <= 10; i += 2) {
Result r = scanner.next();
assertNotNull(r);
assertFalse(r.isEmpty());
byte[] iBytes = Bytes.toBytes(i);
assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
}
} finally {
scanner.close();
}
table.close();
}
Aggregations