Search in sources :

Example 1 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class CoprocessorHost method loadInstance.

/**
   * @param implClass Implementation class
   * @param priority priority
   * @param conf configuration
   * @throws java.io.IOException Exception
   */
public E loadInstance(Class<?> implClass, int priority, Configuration conf) throws IOException {
    if (!Coprocessor.class.isAssignableFrom(implClass)) {
        throw new IOException("Configured class " + implClass.getName() + " must implement " + Coprocessor.class.getName() + " interface ");
    }
    // create the instance
    Coprocessor impl;
    Object o = null;
    try {
        o = implClass.newInstance();
        impl = (Coprocessor) o;
    } catch (InstantiationException e) {
        throw new IOException(e);
    } catch (IllegalAccessException e) {
        throw new IOException(e);
    }
    // create the environment
    E env = createEnvironment(implClass, impl, priority, loadSequence.incrementAndGet(), conf);
    if (env instanceof Environment) {
        ((Environment) env).startup();
    }
    // HBASE-4014: maintain list of loaded coprocessors for later crash analysis
    // if server (master or regionserver) aborts.
    coprocessorNames.add(implClass.getName());
    return env;
}
Also used : Coprocessor(org.apache.hadoop.hbase.Coprocessor) CoprocessorEnvironment(org.apache.hadoop.hbase.CoprocessorEnvironment) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Example 2 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class RegionCoprocessorHost method execOperation.

private boolean execOperation(final boolean earlyExit, final CoprocessorOperation ctx) throws IOException {
    boolean bypass = false;
    List<RegionEnvironment> envs = coprocessors.get();
    for (int i = 0; i < envs.size(); i++) {
        RegionEnvironment env = envs.get(i);
        Coprocessor observer = env.getInstance();
        if (ctx.hasCall(observer)) {
            ctx.prepare(env);
            Thread currentThread = Thread.currentThread();
            ClassLoader cl = currentThread.getContextClassLoader();
            try {
                currentThread.setContextClassLoader(env.getClassLoader());
                ctx.call(observer, ctx);
            } catch (Throwable e) {
                handleCoprocessorThrowable(env, e);
            } finally {
                currentThread.setContextClassLoader(cl);
            }
            bypass |= ctx.shouldBypass();
            if (earlyExit && ctx.shouldComplete()) {
                break;
            }
        }
        ctx.postEnvCall(env);
    }
    return bypass;
}
Also used : MetricsCoprocessor(org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor) Coprocessor(org.apache.hadoop.hbase.Coprocessor) CoprocessorClassLoader(org.apache.hadoop.hbase.util.CoprocessorClassLoader)

Example 3 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestWALObserver method getCoprocessor.

private SampleRegionWALCoprocessor getCoprocessor(WAL wal, Class<? extends SampleRegionWALCoprocessor> clazz) throws Exception {
    WALCoprocessorHost host = wal.getCoprocessorHost();
    Coprocessor c = host.findCoprocessor(clazz.getName());
    return (SampleRegionWALCoprocessor) c;
}
Also used : WALCoprocessorHost(org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost) Coprocessor(org.apache.hadoop.hbase.Coprocessor)

Example 4 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestCoprocessorInterface method testSharedData.

@Test
public void testSharedData() throws IOException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    byte[][] families = { fam1, fam2, fam3 };
    Configuration hc = initConfig();
    HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[] {}, families);
    for (int i = 0; i < 3; i++) {
        HTestConst.addContent(region, fam3);
        region.flush(true);
    }
    region.compact(false);
    region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
    Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
    Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
    Object o = ((CoprocessorImpl) c).getSharedData().get("test1");
    Object o2 = ((CoprocessorII) c2).getSharedData().get("test2");
    assertNotNull(o);
    assertNotNull(o2);
    // to coprocessors get different sharedDatas
    assertFalse(((CoprocessorImpl) c).getSharedData() == ((CoprocessorII) c2).getSharedData());
    c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
    c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII) c2).getSharedData().get("test2") == o2);
    // now have all Environments fail
    try {
        byte[] r = region.getRegionInfo().getStartKey();
        if (r == null || r.length <= 0) {
            // Its the start row.  Can't ask for null.  Ask for minimal key instead.
            r = new byte[] { 0 };
        }
        Get g = new Get(r);
        region.get(g);
        fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class));
    c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
    assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
    c = c2 = null;
    // perform a GC
    System.gc();
    // reopen the region
    region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
    c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
    // CPimpl is unaffected, still the same reference
    assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
    c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
    // new map and object created, hence the reference is different
    // hence the old entry was indeed removed by the GC and new one has been created
    Object o3 = ((CoprocessorII) c2).getSharedData().get("test2");
    assertFalse(o3 == o2);
    HBaseTestingUtil.closeRegionAndWAL(region);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 5 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverInterface method testCompactionOverride.

/**
 * Tests overriding compaction handling via coprocessor hooks
 * @throws Exception
 */
@Test
public void testCompactionOverride() throws Exception {
    final TableName compactTable = TableName.valueOf(name.getMethodName());
    Admin admin = util.getAdmin();
    if (admin.tableExists(compactTable)) {
        admin.disableTable(compactTable);
        admin.deleteTable(compactTable);
    }
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(compactTable).setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)).setCoprocessor(EvenOnlyCompactor.class.getName()).build();
    admin.createTable(tableDescriptor);
    Table table = util.getConnection().getTable(compactTable);
    for (long i = 1; i <= 10; i++) {
        byte[] iBytes = Bytes.toBytes(i);
        Put put = new Put(iBytes);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(A, A, iBytes);
        table.put(put);
    }
    HRegion firstRegion = cluster.getRegions(compactTable).get(0);
    Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class);
    assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
    EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
    // force a compaction
    long ts = EnvironmentEdgeManager.currentTime();
    admin.flush(compactTable);
    // wait for flush
    for (int i = 0; i < 10; i++) {
        if (compactor.lastFlush >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
    LOG.debug("Flush complete");
    ts = compactor.lastFlush;
    admin.majorCompact(compactTable);
    // wait for compaction
    for (int i = 0; i < 30; i++) {
        if (compactor.lastCompaction >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    LOG.debug("Last compaction was at " + compactor.lastCompaction);
    assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
    // only even rows should remain
    ResultScanner scanner = table.getScanner(new Scan());
    try {
        for (long i = 2; i <= 10; i += 2) {
            Result r = scanner.next();
            assertNotNull(r);
            assertFalse(r.isEmpty());
            byte[] iBytes = Bytes.toBytes(i);
            assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
            assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
        }
    } finally {
        scanner.close();
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Aggregations

Coprocessor (org.apache.hadoop.hbase.Coprocessor)17 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)9 Test (org.junit.Test)8 IOException (java.io.IOException)4 TableName (org.apache.hadoop.hbase.TableName)4 Configuration (org.apache.hadoop.conf.Configuration)3 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)3 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)3 RegionCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost)3 WALCoprocessorHost (org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost)3 Method (java.lang.reflect.Method)2 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)2 Admin (org.apache.hadoop.hbase.client.Admin)2 Put (org.apache.hadoop.hbase.client.Put)2 Scan (org.apache.hadoop.hbase.client.Scan)2 Table (org.apache.hadoop.hbase.client.Table)2 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)2 RegionCoprocessor (org.apache.hadoop.hbase.coprocessor.RegionCoprocessor)2 SampleRegionWALCoprocessor (org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor)2 TableId (co.cask.cdap.data2.util.TableId)1