use of io.netty.channel.ChannelFuture in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnWrite.
/**
* Verifies that failed client requests via {@link PartitionRequestClient} are correctly
* attributed to the respective {@link RemoteInputChannel}.
*/
@Test
public void testExceptionOnWrite() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[0];
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
// We need a real server and client in this test, because Netty's EmbeddedChannel is
// not failing the ChannelPromise of failed writes.
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Last outbound handler throws Exception after 1st write
ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() {
int writeNum = 0;
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (writeNum >= 1) {
throw new RuntimeException("Expected test exception.");
}
writeNum++;
ctx.write(msg, promise);
}
});
PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class));
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(1);
// Do this with explicit synchronization. Otherwise this is not robust against slow timings
// of the callback (e.g. we cannot just verify that it was called once, because there is
// a chance that we do this too early).
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
}).when(rich[1]).onError(isA(LocalTransportException.class));
// First request is successful
ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0);
assertTrue(f.await().isSuccess());
// Second request is *not* successful
f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0);
assertFalse(f.await().isSuccess());
// Only the second channel should be notified about the error
verify(rich[0], times(0)).onError(any(LocalTransportException.class));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error.");
}
shutdown(serverAndClient);
}
use of io.netty.channel.ChannelFuture in project flink by apache.
the class HttpTestClient method sendRequest.
/**
* Sends a request to to the server.
*
* <pre>
* HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/overview");
* request.headers().set(HttpHeaders.Names.HOST, host);
* request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE);
*
* sendRequest(request);
* </pre>
*
* @param request The {@link HttpRequest} to send to the server
*/
public void sendRequest(HttpRequest request, FiniteDuration timeout) throws InterruptedException, TimeoutException {
LOG.debug("Writing {}.", request);
// Make the connection attempt.
ChannelFuture connect = bootstrap.connect(host, port);
Channel channel;
if (connect.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
channel = connect.channel();
} else {
throw new TimeoutException("Connection failed");
}
channel.writeAndFlush(request);
}
use of io.netty.channel.ChannelFuture in project flink by apache.
the class PartitionRequestClient method requestSubpartition.
/**
* Requests a remote intermediate result partition queue.
* <p>
* The request goes to the remote producer, for which this partition
* request client instance has been created.
*/
public ChannelFuture requestSubpartition(final ResultPartitionID partitionId, final int subpartitionIndex, final RemoteInputChannel inputChannel, int delayMs) throws IOException {
checkNotClosed();
LOG.debug("Requesting subpartition {} of partition {} with {} ms delay.", subpartitionIndex, partitionId, delayMs);
partitionRequestHandler.addInputChannel(inputChannel);
final PartitionRequest request = new PartitionRequest(partitionId, subpartitionIndex, inputChannel.getInputChannelId());
final ChannelFutureListener listener = new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
partitionRequestHandler.removeInputChannel(inputChannel);
inputChannel.onError(new LocalTransportException("Sending the partition request failed.", future.channel().localAddress(), future.cause()));
}
}
};
if (delayMs == 0) {
ChannelFuture f = tcpChannel.writeAndFlush(request);
f.addListener(listener);
return f;
} else {
final ChannelFuture[] f = new ChannelFuture[1];
tcpChannel.eventLoop().schedule(new Runnable() {
@Override
public void run() {
f[0] = tcpChannel.writeAndFlush(request);
f[0].addListener(listener);
}
}, delayMs, TimeUnit.MILLISECONDS);
return f[0];
}
}
use of io.netty.channel.ChannelFuture in project flink by apache.
the class PartitionRequestClient method sendTaskEvent.
/**
* Sends a task event backwards to an intermediate result partition producer.
* <p>
* Backwards task events flow between readers and writers and therefore
* will only work when both are running at the same time, which is only
* guaranteed to be the case when both the respective producer and
* consumer task run pipelined.
*/
public void sendTaskEvent(ResultPartitionID partitionId, TaskEvent event, final RemoteInputChannel inputChannel) throws IOException {
checkNotClosed();
tcpChannel.writeAndFlush(new TaskEventRequest(event, partitionId, inputChannel.getInputChannelId())).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
inputChannel.onError(new LocalTransportException("Sending the task event failed.", future.channel().localAddress(), future.cause()));
}
}
});
}
use of io.netty.channel.ChannelFuture in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
blockCopy.setNumBytes(locatedBlock.getBlockSize());
ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) {
DatanodeInfo dnInfo = datanodeInfos[i];
Enum<?> storageType = storageTypes[i];
Promise<Channel> promise = eventLoop.newPromise();
futureList.add(promise);
String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
// we need to get the remote address of the channel so we can only move on after
// channel connected. Leave an empty implementation here because netty does not allow
// a null handler.
}
}).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
} else {
promise.tryFailure(future.cause());
}
}
});
}
return futureList;
}
Aggregations