use of org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface in project hbase by apache.
the class AbstractTestIPC method testRemoteError.
@Test
public void testRemoteError() throws IOException, ServiceException {
RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1));
try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
rpcServer.start();
BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
stub.error(null, EmptyRequestProto.getDefaultInstance());
} catch (ServiceException e) {
LOG.info("Caught expected exception: " + e);
IOException ioe = ProtobufUtil.handleRemoteException(e);
assertTrue(ioe instanceof DoNotRetryIOException);
assertTrue(ioe.getMessage().contains("server error!"));
} finally {
rpcServer.stop();
}
}
use of org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface in project hbase by apache.
the class AbstractTestIPC method testTimeout.
@Test
public void testTimeout() throws IOException {
RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1));
try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
rpcServer.start();
BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
HBaseRpcController pcrc = new HBaseRpcControllerImpl();
int ms = 1000;
int timeout = 100;
for (int i = 0; i < 10; i++) {
pcrc.reset();
pcrc.setCallTimeout(timeout);
long startTime = System.nanoTime();
try {
stub.pause(pcrc, PauseRequestProto.newBuilder().setMs(ms).build());
} catch (ServiceException e) {
long waitTime = (System.nanoTime() - startTime) / 1000000;
// expected
LOG.info("Caught expected exception: " + e);
IOException ioe = ProtobufUtil.handleRemoteException(e);
assertTrue(ioe.getCause() instanceof CallTimeoutException);
// confirm that we got exception before the actual pause.
assertTrue(waitTime < ms);
}
}
} finally {
rpcServer.stop();
}
}
use of org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface in project hbase by apache.
the class AbstractTestIPC method testRpcScheduler.
/**
* Tests that the rpc scheduler is called when requests arrive.
*/
@Test
public void testRpcScheduler() throws IOException, ServiceException, InterruptedException {
RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1));
RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, scheduler);
verify(scheduler).init((RpcScheduler.Context) anyObject());
try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
rpcServer.start();
verify(scheduler).start();
BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
for (int i = 0; i < 10; i++) {
stub.echo(null, param);
}
verify(scheduler, times(10)).dispatch((CallRunner) anyObject());
} finally {
rpcServer.stop();
verify(scheduler).stop();
}
}
use of org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface in project hbase by apache.
the class AbstractTestIPC method testCompressCellBlock.
/**
* It is hard to verify the compression is actually happening under the wraps. Hope that if
* unsupported, we'll get an exception out of some time (meantime, have to trace it manually to
* confirm that compression is happening down in the client and server).
*/
@Test
public void testCompressCellBlock() throws IOException, ServiceException {
Configuration conf = new Configuration(HBaseConfiguration.create());
conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName());
List<Cell> cells = new ArrayList<>();
int count = 3;
for (int i = 0; i < count; i++) {
cells.add(CELL);
}
RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1));
try (AbstractRpcClient<?> client = createRpcClient(conf)) {
rpcServer.start();
BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
HBaseRpcController pcrc = new HBaseRpcControllerImpl(CellUtil.createCellScanner(cells));
String message = "hello";
assertEquals(message, stub.echo(pcrc, EchoRequestProto.newBuilder().setMessage(message).build()).getMessage());
int index = 0;
CellScanner cellScanner = pcrc.cellScanner();
assertNotNull(cellScanner);
while (cellScanner.advance()) {
assertEquals(CELL, cellScanner.current());
index++;
}
assertEquals(count, index);
} finally {
rpcServer.stop();
}
}
use of org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface in project hbase by apache.
the class AbstractTestIPC method testAsyncRemoteError.
@Test
public void testAsyncRemoteError() throws IOException {
AbstractRpcClient<?> client = createRpcClient(CONF);
RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1));
try {
rpcServer.start();
Interface stub = newStub(client, rpcServer.getListenerAddress());
BlockingRpcCallback<EmptyResponseProto> callback = new BlockingRpcCallback<>();
HBaseRpcController pcrc = new HBaseRpcControllerImpl();
stub.error(pcrc, EmptyRequestProto.getDefaultInstance(), callback);
assertNull(callback.get());
assertTrue(pcrc.failed());
LOG.info("Caught expected exception: " + pcrc.getFailed());
IOException ioe = ProtobufUtil.handleRemoteException(pcrc.getFailed());
assertTrue(ioe instanceof DoNotRetryIOException);
assertTrue(ioe.getMessage().contains("server error!"));
} finally {
client.close();
rpcServer.stop();
}
}
Aggregations