use of io.netty.channel.ChannelHandlerContext in project failsafe by jhalterman.
the class NettyExample method createBootstrap.
static Bootstrap createBootstrap(EventLoopGroup group) {
return new Bootstrap().group(group).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ctx.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
cause.printStackTrace();
ctx.close();
}
});
}
});
}
use of io.netty.channel.ChannelHandlerContext in project pinot by linkedin.
the class ScheduledRequestHandlerTest method setupTestMethod.
@BeforeTest
public void setupTestMethod() {
serverMetrics = new ServerMetrics(new MetricsRegistry());
channelHandlerContext = mock(ChannelHandlerContext.class, RETURNS_DEEP_STUBS);
when(channelHandlerContext.channel().remoteAddress()).thenAnswer(new Answer<InetSocketAddress>() {
@Override
public InetSocketAddress answer(InvocationOnMock invocationOnMock) throws Throwable {
return new InetSocketAddress("localhost", 60000);
}
});
queryScheduler = mock(QueryScheduler.class);
queryExecutor = new ServerQueryExecutorV1Impl();
}
use of io.netty.channel.ChannelHandlerContext in project hive by apache.
the class RpcDispatcher method handleCall.
private void handleCall(ChannelHandlerContext ctx, Object msg) throws Exception {
Method handler = handlers.get(msg.getClass());
if (handler == null) {
handler = getClass().getDeclaredMethod("handle", ChannelHandlerContext.class, msg.getClass());
handler.setAccessible(true);
handlers.put(msg.getClass(), handler);
}
Rpc.MessageType replyType;
Object replyPayload;
try {
replyPayload = handler.invoke(this, ctx, msg);
if (replyPayload == null) {
replyPayload = new Rpc.NullMessage();
}
replyType = Rpc.MessageType.REPLY;
} catch (InvocationTargetException ite) {
LOG.debug(String.format("[%s] Error in RPC handler.", name()), ite.getCause());
replyPayload = Throwables.getStackTraceAsString(ite.getCause());
replyType = Rpc.MessageType.ERROR;
}
ctx.channel().write(new Rpc.MessageHeader(lastHeader.id, replyType));
ctx.channel().writeAndFlush(replyPayload);
}
use of io.netty.channel.ChannelHandlerContext in project flink by apache.
the class KvStateClientTest method testServerClosesChannel.
/**
* Tests that a server channel close, closes the connection and removes it
* from the established connections.
*/
@Test
public void testServerClosesChannel() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateClient client = null;
Channel serverChannel = null;
try {
client = new KvStateClient(1, stats);
final AtomicBoolean received = new AtomicBoolean();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
received.set(true);
}
});
KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
// Requests
Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);
while (!received.get() && deadline.hasTimeLeft()) {
Thread.sleep(50);
}
assertTrue("Receive timed out", received.get());
assertEquals(1, stats.getNumConnections());
channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
try {
Await.result(future, deadline.timeLeft());
fail("Did not throw expected server failure");
} catch (ClosedChannelException ignored) {
// Expected
}
assertEquals(0, stats.getNumConnections());
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
Thread.sleep(100);
}
assertEquals(1, stats.getNumRequests());
assertEquals(0, stats.getNumSuccessful());
assertEquals(1, stats.getNumFailed());
} finally {
if (client != null) {
client.shutDown();
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
use of io.netty.channel.ChannelHandlerContext in project flink by apache.
the class KvStateServerTest method testSimpleRequest.
/**
* Tests a simple successful query via a SocketChannel.
*/
@Test
public void testSimpleRequest() throws Exception {
KvStateServer server = null;
Bootstrap bootstrap = null;
try {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
server.start();
KvStateServerAddress serverAddress = server.getAddress();
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), registry.createTaskRegistry(new JobID(), new JobVertexID()));
final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();
registry.registerListener(registryListener);
ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
// Update KvState
int expectedValue = 712828289;
int key = 99812822;
backend.setCurrentKey(key);
state.update(expectedValue);
// Request
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
// Connect to the server
final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4), new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
responses.add((ByteBuf) msg);
}
});
Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId, registryListener.kvStateId, serializedKeyAndNamespace);
channel.writeAndFlush(request);
ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);
assertEquals(requestId, response.getRequestId());
int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(), IntSerializer.INSTANCE);
assertEquals(expectedValue, actualValue);
} finally {
if (server != null) {
server.shutDown();
}
if (bootstrap != null) {
EventLoopGroup group = bootstrap.group();
if (group != null) {
group.shutdownGracefully();
}
}
}
}
Aggregations