Search in sources :

Example 11 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverInterface method verifyMethodResult.

// check each region whether the coprocessor upcalls are called or not.
private void verifyMethodResult(Class<?> c, String[] methodName, TableName tableName, Object[] value) throws IOException {
    try {
        for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
            if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
                continue;
            }
            for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
                if (!r.getTable().equals(tableName)) {
                    continue;
                }
                RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();
                Coprocessor cp = cph.findCoprocessor(c.getName());
                assertNotNull(cp);
                for (int i = 0; i < methodName.length; ++i) {
                    Method m = c.getMethod(methodName[i]);
                    Object o = m.invoke(cp);
                    assertTrue("Result of " + c.getName() + "." + methodName[i] + " is expected to be " + value[i].toString() + ", while we get " + o.toString(), o.equals(value[i]));
                }
            }
        }
    } catch (Exception e) {
        throw new IOException(e.toString());
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Coprocessor(org.apache.hadoop.hbase.Coprocessor) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Method(java.lang.reflect.Method) IOException(java.io.IOException) IOException(java.io.IOException)

Example 12 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestRegionObserverStacking method testRegionObserverStacking.

public void testRegionObserverStacking() throws Exception {
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
    byte[] A = Bytes.toBytes("A");
    byte[][] FAMILIES = new byte[][] { A };
    Configuration conf = HBaseConfiguration.create();
    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
    RegionCoprocessorHost h = region.getCoprocessorHost();
    h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
    h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
    h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
    Put put = new Put(ROW);
    put.addColumn(A, A, A);
    region.put(put);
    Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
    long idA = ((ObserverA) c).id;
    c = h.findCoprocessor(ObserverB.class.getName());
    long idB = ((ObserverB) c).id;
    c = h.findCoprocessor(ObserverC.class.getName());
    long idC = ((ObserverC) c).id;
    assertTrue(idA < idB);
    assertTrue(idB < idC);
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Coprocessor(org.apache.hadoop.hbase.Coprocessor) RegionCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost) Put(org.apache.hadoop.hbase.client.Put)

Example 13 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestWALObserver method getCoprocessor.

private SampleRegionWALObserver getCoprocessor(WAL wal, Class<? extends SampleRegionWALObserver> clazz) throws Exception {
    WALCoprocessorHost host = wal.getCoprocessorHost();
    Coprocessor c = host.findCoprocessor(clazz.getName());
    return (SampleRegionWALObserver) c;
}
Also used : WALCoprocessorHost(org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost) Coprocessor(org.apache.hadoop.hbase.Coprocessor)

Example 14 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class AbstractTestFSWAL method testWALCoprocessorLoaded.

/**
   * A loaded WAL coprocessor won't break existing WAL test cases.
   */
@Test
public void testWALCoprocessorLoaded() throws Exception {
    // test to see whether the coprocessor is loaded or not.
    AbstractFSWAL<?> wal = null;
    try {
        wal = newWAL(FS, FSUtils.getWALRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null);
        WALCoprocessorHost host = wal.getCoprocessorHost();
        Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
        assertNotNull(c);
    } finally {
        if (wal != null) {
            wal.close();
        }
    }
}
Also used : SampleRegionWALObserver(org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Test(org.junit.Test)

Example 15 with Coprocessor

use of org.apache.hadoop.hbase.Coprocessor in project hbase by apache.

the class TestNamespaceAuditor method testRegionMerge.

@Test
public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
    tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
    final int initialRegions = 3;
    ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
        UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
    Collections.sort(hris);
    // merge the two regions
    final Set<String> encodedRegionNamesToMerge = Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
    ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                    return false;
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return false;
                }
            }
            return true;
        }

        @Override
        public String explainFailure() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                    return hri + " which is expected to be merged is still online";
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return hri + " is still in not opened";
                }
            }
            return "Unknown";
        }
    });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris);
    final HRegionInfo hriToSplit = hris.get(1);
    ADMIN.split(tableTwo, Bytes.toBytes("500"));
    UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                    return false;
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return false;
                }
            }
            return true;
        }

        @Override
        public String explainFailure() throws Exception {
            RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
                if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                    return hriToSplit + " which is expected to be split is still online";
                }
                if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                    return hri + " is still in not opened";
                }
            }
            return "Unknown";
        }
    });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // fail region merge through Coprocessor hook
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName());
    CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
    masterObserver.failMerge(true);
    masterObserver.triggered = false;
    ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    masterObserver.waitUtilTriggered();
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // verify that we cannot split
    HRegionInfo hriToSplit2 = hris.get(1);
    ADMIN.split(tableTwo, TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) QuotaExceededException(org.apache.hadoop.hbase.quotas.QuotaExceededException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) RegionStates(org.apache.hadoop.hbase.master.RegionStates) Coprocessor(org.apache.hadoop.hbase.Coprocessor) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Aggregations

Coprocessor (org.apache.hadoop.hbase.Coprocessor)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 Test (org.junit.Test)7 TableName (org.apache.hadoop.hbase.TableName)5 IOException (java.io.IOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 Region (org.apache.hadoop.hbase.regionserver.Region)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 RegionCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost)3 Method (java.lang.reflect.Method)2 CoprocessorEnvironment (org.apache.hadoop.hbase.CoprocessorEnvironment)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 Put (org.apache.hadoop.hbase.client.Put)2 Scan (org.apache.hadoop.hbase.client.Scan)2 Table (org.apache.hadoop.hbase.client.Table)2 SampleRegionWALObserver (org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver)2 WALCoprocessorHost (org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost)2