Search in sources :

Example 1 with RpcServerInterface

use of org.apache.hadoop.hbase.ipc.RpcServerInterface in project hbase by apache.

the class TokenProvider method start.

@Override
public void start(CoprocessorEnvironment env) {
    // if running at region
    if (env instanceof RegionCoprocessorEnvironment) {
        RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) env;
        RpcServerInterface server = regionEnv.getRegionServerServices().getRpcServer();
        SecretManager<?> mgr = ((RpcServer) server).getSecretManager();
        if (mgr instanceof AuthenticationTokenSecretManager) {
            secretManager = (AuthenticationTokenSecretManager) mgr;
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) RpcServer(org.apache.hadoop.hbase.ipc.RpcServer) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface)

Example 2 with RpcServerInterface

use of org.apache.hadoop.hbase.ipc.RpcServerInterface in project hbase by apache.

the class TestMultiRespectsLimits method testMultiLimits.

@Test
public void testMultiLimits() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    Table t = TEST_UTIL.createTable(tableName, FAMILY);
    TEST_UTIL.loadTable(t, FAMILY, false);
    // Split the table to make sure that the chunking happens accross regions.
    try (final Admin admin = TEST_UTIL.getAdmin()) {
        admin.split(tableName);
        TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return admin.getTableRegions(tableName).size() > 1;
            }
        });
    }
    List<Get> gets = new ArrayList<>(MAX_SIZE);
    for (int i = 0; i < MAX_SIZE; i++) {
        gets.add(new Get(HBaseTestingUtility.ROWS[i]));
    }
    RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
    BaseSource s = rpcServer.getMetrics().getMetricsSource();
    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
    Result[] results = t.get(gets);
    assertEquals(MAX_SIZE, results.length);
    // Cells from TEST_UTIL.loadTable have a length of 27.
    // Multiplying by less than that gives an easy lower bound on size.
    // However in reality each kv is being reported as much higher than that.
    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
}
Also used : BaseSource(org.apache.hadoop.hbase.metrics.BaseSource) ArrayList(java.util.ArrayList) TableName(org.apache.hadoop.hbase.TableName) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Example 3 with RpcServerInterface

use of org.apache.hadoop.hbase.ipc.RpcServerInterface in project hbase by apache.

the class TestSecureIPC method callRpcService.

/**
   * Sets up a RPC Server and a Client. Does a RPC checks the result. If an exception is thrown from
   * the stub, this function will throw root cause of that exception.
   */
private void callRpcService(User clientUser) throws Exception {
    SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class);
    Mockito.when(securityInfoMock.getServerPrincipal()).thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL);
    SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock);
    InetSocketAddress isa = new InetSocketAddress(HOST, 0);
    RpcServerInterface rpcServer = RpcServerFactory.createRpcServer(null, "AbstractTestSecureIPC", Lists.newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), isa, serverConf, new FifoRpcScheduler(serverConf, 1));
    rpcServer.start();
    try (RpcClient rpcClient = RpcClientFactory.createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())) {
        BlockingInterface stub = newBlockingStub(rpcClient, rpcServer.getListenerAddress(), clientUser);
        TestThread th1 = new TestThread(stub);
        final Throwable[] exception = new Throwable[1];
        Collections.synchronizedList(new ArrayList<Throwable>());
        Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() {

            public void uncaughtException(Thread th, Throwable ex) {
                exception[0] = ex;
            }
        };
        th1.setUncaughtExceptionHandler(exceptionHandler);
        th1.start();
        th1.join();
        if (exception[0] != null) {
            // throw root cause.
            while (exception[0].getCause() != null) {
                exception[0] = exception[0].getCause();
            }
            throw (Exception) exception[0];
        }
    } finally {
        rpcServer.stop();
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) FifoRpcScheduler(org.apache.hadoop.hbase.ipc.FifoRpcScheduler) SaslException(javax.security.sasl.SaslException) ExpectedException(org.junit.rules.ExpectedException) IOException(java.io.IOException) BlockingInterface(org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) BlockingRpcClient(org.apache.hadoop.hbase.ipc.BlockingRpcClient) RpcClient(org.apache.hadoop.hbase.ipc.RpcClient) NettyRpcClient(org.apache.hadoop.hbase.ipc.NettyRpcClient)

Example 4 with RpcServerInterface

use of org.apache.hadoop.hbase.ipc.RpcServerInterface in project hbase by apache.

the class TestMultiRespectsLimits method testBlockMultiLimits.

@Test
public void testBlockMultiLimits() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    desc.addFamily(hcd);
    TEST_UTIL.getAdmin().createTable(desc);
    Table t = TEST_UTIL.getConnection().getTable(tableName);
    final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
    RpcServerInterface rpcServer = regionServer.getRpcServer();
    BaseSource s = rpcServer.getMetrics().getMetricsSource();
    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
    byte[] row = Bytes.toBytes("TEST");
    byte[][] cols = new byte[][] { // Get this
    Bytes.toBytes("0"), // Buffer
    Bytes.toBytes("1"), // Buffer
    Bytes.toBytes("2"), // Get This
    Bytes.toBytes("3"), // Buffer
    Bytes.toBytes("4"), // Buffer
    Bytes.toBytes("5") };
    // Set the value size so that one result will be less than the MAX_SIE
    // however the block being reference will be larger than MAX_SIZE.
    // This should cause the regionserver to try and send a result immediately.
    byte[] value = new byte[MAX_SIZE - 100];
    ThreadLocalRandom.current().nextBytes(value);
    for (byte[] col : cols) {
        Put p = new Put(row);
        p.addImmutable(FAMILY, col, value);
        t.put(p);
    }
    // Make sure that a flush happens
    try (final Admin admin = TEST_UTIL.getAdmin()) {
        admin.flush(tableName);
        TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return regionServer.getOnlineRegions(tableName).get(0).getMaxFlushedSeqId() > 3;
            }
        });
    }
    List<Get> gets = new ArrayList<>(2);
    Get g0 = new Get(row);
    g0.addColumn(FAMILY, cols[0]);
    gets.add(g0);
    Get g2 = new Get(row);
    g2.addColumn(FAMILY, cols[3]);
    gets.add(g2);
    Result[] results = t.get(gets);
    assertEquals(2, results.length);
    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s);
    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions, s);
}
Also used : BaseSource(org.apache.hadoop.hbase.metrics.BaseSource) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) TableName(org.apache.hadoop.hbase.TableName) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) Waiter(org.apache.hadoop.hbase.Waiter) Test(org.junit.Test)

Aggregations

RpcServerInterface (org.apache.hadoop.hbase.ipc.RpcServerInterface)4 ArrayList (java.util.ArrayList)2 TableName (org.apache.hadoop.hbase.TableName)2 Waiter (org.apache.hadoop.hbase.Waiter)2 BaseSource (org.apache.hadoop.hbase.metrics.BaseSource)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 InetSocketAddress (java.net.InetSocketAddress)1 SaslException (javax.security.sasl.SaslException)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)1 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)1 BlockingRpcClient (org.apache.hadoop.hbase.ipc.BlockingRpcClient)1 FifoRpcScheduler (org.apache.hadoop.hbase.ipc.FifoRpcScheduler)1 NettyRpcClient (org.apache.hadoop.hbase.ipc.NettyRpcClient)1 RpcClient (org.apache.hadoop.hbase.ipc.RpcClient)1 RpcServer (org.apache.hadoop.hbase.ipc.RpcServer)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1 BlockingInterface (org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface)1 ExpectedException (org.junit.rules.ExpectedException)1