Search in sources :

Example 6 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverInterface method testCompactionOverride.

/**
   * Tests overriding compaction handling via coprocessor hooks
   * @throws Exception
   */
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
    final TableName compactTable = TableName.valueOf(name.getMethodName());
    Admin admin = util.getAdmin();
    if (admin.tableExists(compactTable)) {
        admin.disableTable(compactTable);
        admin.deleteTable(compactTable);
    }
    HTableDescriptor htd = new HTableDescriptor(compactTable);
    htd.addFamily(new HColumnDescriptor(A));
    htd.addCoprocessor(EvenOnlyCompactor.class.getName());
    admin.createTable(htd);
    Table table = util.getConnection().getTable(compactTable);
    for (long i = 1; i <= 10; i++) {
        byte[] iBytes = Bytes.toBytes(i);
        Put put = new Put(iBytes);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(A, A, iBytes);
        table.put(put);
    }
    HRegion firstRegion = cluster.getRegions(compactTable).get(0);
    Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
    assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
    EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
    // force a compaction
    long ts = System.currentTimeMillis();
    admin.flush(compactTable);
    // wait for flush
    for (int i = 0; i < 10; i++) {
        if (compactor.lastFlush >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
    LOG.debug("Flush complete");
    ts = compactor.lastFlush;
    admin.majorCompact(compactTable);
    // wait for compaction
    for (int i = 0; i < 30; i++) {
        if (compactor.lastCompaction >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    LOG.debug("Last compaction was at " + compactor.lastCompaction);
    assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
    // only even rows should remain
    ResultScanner scanner = table.getScanner(new Scan());
    try {
        for (long i = 2; i <= 10; i += 2) {
            Result r = scanner.next();
            assertNotNull(r);
            assertFalse(r.isEmpty());
            byte[] iBytes = Bytes.toBytes(i);
            assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
            assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
        }
    } finally {
        scanner.close();
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 7 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestWALFactory method testWALCoprocessorLoaded.

/**
   * A loaded WAL coprocessor won't break existing WAL test cases.
   */
@Test
public void testWALCoprocessorLoaded() throws Exception {
    // test to see whether the coprocessor is loaded or not.
    WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION, null).getCoprocessorHost();
    Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
    assertNotNull(c);
}
Also used : WALCoprocessorHost(org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost) SampleRegionWALObserver(org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Test(org.junit.Test)

Example 8 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverInterface method verifyMethodResult.

// check each region whether the coprocessor upcalls are called or not.
private void verifyMethodResult(Class<?> c, String[] methodName, TableName tableName, Object[] value) throws IOException {
    try {
        for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
            if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
                continue;
            }
            for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
                if (!r.getTable().equals(tableName)) {
                    continue;
                }
                RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();
                Coprocessor cp = cph.findCoprocessor(c.getName());
                assertNotNull(cp);
                for (int i = 0; i < methodName.length; ++i) {
                    Method m = c.getMethod(methodName[i]);
                    Object o = m.invoke(cp);
                    assertTrue("Result of " + c.getName() + "." + methodName[i] + " is expected to be " + value[i].toString() + ", while we get " + o.toString(), o.equals(value[i]));
                }
            }
        }
    } catch (Exception e) {
        throw new IOException(e.toString());
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Coprocessor(org.apache.hadoop.hbase.Coprocessor) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Method(java.lang.reflect.Method) IOException(java.io.IOException) IOException(java.io.IOException)

Example 9 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverStacking method testRegionObserverStacking.

public void testRegionObserverStacking() throws Exception {
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
    byte[] A = Bytes.toBytes("A");
    byte[][] FAMILIES = new byte[][] { A };
    Configuration conf = HBaseConfiguration.create();
    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
    RegionCoprocessorHost h = region.getCoprocessorHost();
    h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
    h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
    h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
    Put put = new Put(ROW);
    put.addColumn(A, A, A);
    region.put(put);
    Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
    long idA = ((ObserverA) c).id;
    c = h.findCoprocessor(ObserverB.class.getName());
    long idB = ((ObserverB) c).id;
    c = h.findCoprocessor(ObserverC.class.getName());
    long idC = ((ObserverC) c).id;
    assertTrue(idA < idB);
    assertTrue(idB < idC);
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Coprocessor(org.apache.hadoop.hbase.Coprocessor) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Put(org.apache.hadoop.hbase.client.Put)

Example 10 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestWALObserver method getCoprocessor.

private SampleRegionWALObserver getCoprocessor(WAL wal, Class<? extends SampleRegionWALObserver> clazz) throws Exception {
    WALCoprocessorHost host = wal.getCoprocessorHost();
    Coprocessor c = host.findCoprocessor(clazz.getName());
    return (SampleRegionWALObserver) c;
}
Also used : WALCoprocessorHost(org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost) Coprocessor(org.apache.hadoop.hbase.Coprocessor)

Aggregations

Coprocessor (org.apache.hadoop.hbase.Coprocessor)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 Test (org.junit.Test)7 TableName (org.apache.hadoop.hbase.TableName)5 IOException (java.io.IOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 Region (org.apache.hadoop.hbase.regionserver.Region)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 RegionCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost)3 Method (java.lang.reflect.Method)2 CoprocessorEnvironment (org.apache.hadoop.hbase.CoprocessorEnvironment)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 Put (org.apache.hadoop.hbase.client.Put)2 Scan (org.apache.hadoop.hbase.client.Scan)2 Table (org.apache.hadoop.hbase.client.Table)2 SampleRegionWALObserver (org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver)2 WALCoprocessorHost (org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost)2