use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestRegionObserverInterface method testCompactionOverride.
/**
* Tests overriding compaction handling via coprocessor hooks
* @throws Exception
*/
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
final TableName compactTable = TableName.valueOf(name.getMethodName());
Admin admin = util.getAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
}
HTableDescriptor htd = new HTableDescriptor(compactTable);
htd.addFamily(new HColumnDescriptor(A));
htd.addCoprocessor(EvenOnlyCompactor.class.getName());
admin.createTable(htd);
Table table = util.getConnection().getTable(compactTable);
for (long i = 1; i <= 10; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(A, A, iBytes);
table.put(put);
}
HRegion firstRegion = cluster.getRegions(compactTable).get(0);
Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
// force a compaction
long ts = System.currentTimeMillis();
admin.flush(compactTable);
// wait for flush
for (int i = 0; i < 10; i++) {
if (compactor.lastFlush >= ts) {
break;
}
Thread.sleep(1000);
}
assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
LOG.debug("Flush complete");
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
// wait for compaction
for (int i = 0; i < 30; i++) {
if (compactor.lastCompaction >= ts) {
break;
}
Thread.sleep(1000);
}
LOG.debug("Last compaction was at " + compactor.lastCompaction);
assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
// only even rows should remain
ResultScanner scanner = table.getScanner(new Scan());
try {
for (long i = 2; i <= 10; i += 2) {
Result r = scanner.next();
assertNotNull(r);
assertFalse(r.isEmpty());
byte[] iBytes = Bytes.toBytes(i);
assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
}
} finally {
scanner.close();
}
table.close();
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestWALFactory method testWALCoprocessorLoaded.
/**
* A loaded WAL coprocessor won't break existing WAL test cases.
*/
@Test
public void testWALCoprocessorLoaded() throws Exception {
// test to see whether the coprocessor is loaded or not.
WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION, null).getCoprocessorHost();
Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
assertNotNull(c);
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestRegionObserverInterface method verifyMethodResult.
// check each region whether the coprocessor upcalls are called or not.
private void verifyMethodResult(Class<?> c, String[] methodName, TableName tableName, Object[] value) throws IOException {
try {
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
continue;
}
for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
if (!r.getTable().equals(tableName)) {
continue;
}
RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();
Coprocessor cp = cph.findCoprocessor(c.getName());
assertNotNull(cp);
for (int i = 0; i < methodName.length; ++i) {
Method m = c.getMethod(methodName[i]);
Object o = m.invoke(cp);
assertTrue("Result of " + c.getName() + "." + methodName[i] + " is expected to be " + value[i].toString() + ", while we get " + o.toString(), o.equals(value[i]));
}
}
}
} catch (Exception e) {
throw new IOException(e.toString());
}
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestRegionObserverStacking method testRegionObserverStacking.
public void testRegionObserverStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
long idA = ((ObserverA) c).id;
c = h.findCoprocessor(ObserverB.class.getName());
long idB = ((ObserverB) c).id;
c = h.findCoprocessor(ObserverC.class.getName());
long idC = ((ObserverC) c).id;
assertTrue(idA < idB);
assertTrue(idB < idC);
HBaseTestingUtility.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.
the class TestWALObserver method getCoprocessor.
private SampleRegionWALObserver getCoprocessor(WAL wal, Class<? extends SampleRegionWALObserver> clazz) throws Exception {
WALCoprocessorHost host = wal.getCoprocessorHost();
Coprocessor c = host.findCoprocessor(clazz.getName());
return (SampleRegionWALObserver) c;
}
Aggregations