use of io.netty.util.concurrent.GenericFutureListener in project cdap by cdapio.
the class ServiceSocksServerConnectHandler method createForwardingChannelHandler.
@Override
protected Future<RelayChannelHandler> createForwardingChannelHandler(Channel inboundChannel, String destAddress, int destPort) {
Promise<RelayChannelHandler> promise = new DefaultPromise<>(inboundChannel.eventLoop());
// Creates a bootstrap for connecting to the target service
ChannelGroup channels = new DefaultChannelGroup(inboundChannel.eventLoop());
Bootstrap bootstrap = new Bootstrap().group(inboundChannel.eventLoop()).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) {
channels.add(ctx.channel());
// When the outbound connection is active, adds the relay channel handler for the current pipeline,
// which is for relaying traffic coming back from outbound connection.
// Also complete the relay channel handler future, which is for relaying traffic from inbound to outbound.
ctx.pipeline().addLast(new SimpleRelayChannelHandler(inboundChannel));
promise.setSuccess(new SimpleRelayChannelHandler(ctx.channel()));
}
});
// Discover the target address
Promise<Discoverable> discoverablePromise = new DefaultPromise<>(inboundChannel.eventLoop());
Cancellable cancellable = discoveryServiceClient.discover(destAddress).watchChanges(serviceDiscovered -> {
// If it is discovered, make a connection and complete the channel handler future
Discoverable discoverable = new RandomEndpointStrategy(() -> serviceDiscovered).pick();
if (discoverable != null) {
discoverablePromise.setSuccess(discoverable);
}
}, inboundChannel.eventLoop());
// When discovery completed successfully, connect to the destination
discoverablePromise.addListener((GenericFutureListener<Future<Discoverable>>) discoverableFuture -> {
cancellable.cancel();
if (discoverableFuture.isSuccess()) {
Discoverable discoverable = discoverableFuture.get();
bootstrap.connect(discoverable.getSocketAddress()).addListener((ChannelFutureListener) channelFuture -> {
if (!channelFuture.isSuccess()) {
promise.setFailure(channelFuture.cause());
}
});
} else {
promise.setFailure(discoverableFuture.cause());
}
});
// On inbound channel close, close all outbound channels.
// Also cancel the watch since it is no longer needed.
// This is to handle case where discovery never return an endpoint before client connection timeout
inboundChannel.closeFuture().addListener((ChannelFutureListener) future -> {
cancellable.cancel();
channels.close();
});
return promise;
}
use of io.netty.util.concurrent.GenericFutureListener in project riposte by Nike-Inc.
the class DTraceEndHandlerTest method endDtrace_completes_the_trace_using_ChannelFutureListener_if_state_is_not_null_and_isResponseSendingLastChunkSent_returns_true.
@Test
public void endDtrace_completes_the_trace_using_ChannelFutureListener_if_state_is_not_null_and_isResponseSendingLastChunkSent_returns_true() throws Exception {
// given
assertThat(state.isTraceCompletedOrScheduled(), is(false));
assertThat(state.isResponseSendingLastChunkSent(), is(true));
assertThat(state.getDistributedTraceStack(), nullValue());
Pair<Deque<Span>, Map<String, String>> expectedDtraceInfo = setupStateWithNewSpan("blahTrace");
assertThat(state.getDistributedTraceStack(), notNullValue());
assertThat(state.getDistributedTraceStack(), is(expectedDtraceInfo.getLeft()));
assertThat(state.getDistributedTraceStack().size(), is(1));
assertThat(state.isTracingResponseTaggingAndFinalSpanNameCompleted(), is(false));
Span expectedSpan = expectedDtraceInfo.getLeft().peek();
// when
handlerSpy.endDtrace(ctxMock);
// then
// completeCurrentSpan() not immediately called, but scheduled
verify(handlerSpy, never()).completeCurrentSpan();
assertThat(state.isTraceCompletedOrScheduled(), is(true));
// Response tagging was done.
assertThat(state.isTracingResponseTaggingAndFinalSpanNameCompleted(), is(true));
// Extract the listener that was attached to the last chunk future.
GenericFutureListener lastChunkListener = extractChannelFutureListenerAddedToLastChunkFuture();
assertThat(lastChunkListener, notNullValue());
assertThat(lastChunkListener, instanceOf(ChannelFutureListenerWithTracingAndMdc.class));
assertThat(Whitebox.getInternalState(lastChunkListener, "distributedTraceStackForExecution"), is(expectedDtraceInfo.getLeft()));
assertThat(Whitebox.getInternalState(lastChunkListener, "mdcContextMapForExecution"), is(expectedDtraceInfo.getRight()));
Consumer<ChannelFuture> embeddedListenerConsumer = (Consumer<ChannelFuture>) Whitebox.getInternalState(lastChunkListener, "postCompleteOperation");
// Execute the embedded listener so we can validate what it does. Note that we can't verify using mockito spy verify(),
// because the method call goes through the internal handler, not the spy impl. But we can still verify by
// setting up the Tracer state to what we expect, execute the embedded listener, and verify subsequent Tracer state.
AsyncNettyHelper.linkTracingAndMdcToCurrentThread(expectedDtraceInfo);
assertThat(Tracer.getInstance().getCurrentSpan(), is(expectedSpan));
embeddedListenerConsumer.accept(null);
assertThat(Tracer.getInstance().getCurrentSpan(), nullValue());
}
use of io.netty.util.concurrent.GenericFutureListener in project ambry by linkedin.
the class MultiplexedChannelRecord method acquireClaimedStream.
void acquireClaimedStream(Promise<Channel> promise) {
NettyUtils.doInEventLoop(parentChannel.eventLoop(), () -> {
if (state != RecordState.OPEN) {
String message;
// GOAWAY
if (state == RecordState.CLOSED_TO_NEW) {
message = String.format("Connection %s received GOAWAY with Last Stream ID %d. Unable to open new " + "streams on this connection.", parentChannel, lastStreamId);
} else {
message = String.format("Connection %s was closed while acquiring new stream.", parentChannel);
}
log.warn(message);
promise.setFailure(new IOException(message));
return;
}
Future<Http2StreamChannel> streamFuture = new Http2StreamChannelBootstrap(parentChannel).handler(streamChannelInitializer).open();
streamFuture.addListener((GenericFutureListener<Future<Http2StreamChannel>>) future -> {
NettyUtils.warnIfNotInEventLoop(parentChannel.eventLoop());
if (!future.isSuccess()) {
promise.setFailure(future.cause());
return;
}
Http2StreamChannel channel = future.getNow();
streamChannels.put(channel.id(), channel);
promise.setSuccess(channel);
if (closeIfIdleTask == null && allowedIdleTimeInMs != null && allowedIdleTimeInMs > 0) {
enableCloseIfIdleTask();
}
});
}, promise);
}
use of io.netty.util.concurrent.GenericFutureListener in project flink by apache.
the class TaskManagerLogHandler method respondAsLeader.
/**
* Response when running with leading JobManager.
*/
@Override
protected void respondAsLeader(final ChannelHandlerContext ctx, final Routed routed, final ActorGateway jobManager) {
if (cache == null) {
scala.concurrent.Future<Object> portFuture = jobManager.ask(JobManagerMessages.getRequestBlobManagerPort(), timeout);
scala.concurrent.Future<BlobCache> cacheFuture = portFuture.map(new Mapper<Object, BlobCache>() {
@Override
public BlobCache checkedApply(Object result) throws IOException {
Option<String> hostOption = jobManager.actor().path().address().host();
String host = hostOption.isDefined() ? hostOption.get() : "localhost";
int port = (int) result;
return new BlobCache(new InetSocketAddress(host, port), config);
}
}, executor);
cache = new FlinkFuture<>(cacheFuture);
}
final String taskManagerID = routed.pathParams().get(TaskManagersHandler.TASK_MANAGER_ID_KEY);
final HttpRequest request = routed.request();
//fetch TaskManager logs if no other process is currently doing it
if (lastRequestPending.putIfAbsent(taskManagerID, true) == null) {
try {
InstanceID instanceID = new InstanceID(StringUtils.hexStringToByte(taskManagerID));
scala.concurrent.Future<JobManagerMessages.TaskManagerInstance> scalaTaskManagerFuture = jobManager.ask(new JobManagerMessages.RequestTaskManagerInstance(instanceID), timeout).mapTo(ClassTag$.MODULE$.<JobManagerMessages.TaskManagerInstance>apply(JobManagerMessages.TaskManagerInstance.class));
Future<JobManagerMessages.TaskManagerInstance> taskManagerFuture = new FlinkFuture<>(scalaTaskManagerFuture);
Future<BlobKey> blobKeyFuture = taskManagerFuture.thenCompose(new ApplyFunction<JobManagerMessages.TaskManagerInstance, Future<BlobKey>>() {
@Override
public Future<BlobKey> apply(JobManagerMessages.TaskManagerInstance value) {
Instance taskManager = value.instance().get();
if (serveLogFile) {
return taskManager.getTaskManagerGateway().requestTaskManagerLog(timeTimeout);
} else {
return taskManager.getTaskManagerGateway().requestTaskManagerStdout(timeTimeout);
}
}
});
Future<String> logPathFuture = blobKeyFuture.thenCombine(cache, new BiFunction<BlobKey, BlobCache, Tuple2<BlobKey, BlobCache>>() {
@Override
public Tuple2<BlobKey, BlobCache> apply(BlobKey blobKey, BlobCache blobCache) {
return Tuple2.of(blobKey, blobCache);
}
}).thenComposeAsync(new ApplyFunction<Tuple2<BlobKey, BlobCache>, Future<String>>() {
@Override
public Future<String> apply(Tuple2<BlobKey, BlobCache> value) {
final BlobKey blobKey = value.f0;
final BlobCache blobCache = value.f1;
//delete previous log file, if it is different than the current one
HashMap<String, BlobKey> lastSubmittedFile = serveLogFile ? lastSubmittedLog : lastSubmittedStdout;
if (lastSubmittedFile.containsKey(taskManagerID)) {
if (!blobKey.equals(lastSubmittedFile.get(taskManagerID))) {
try {
blobCache.deleteGlobal(lastSubmittedFile.get(taskManagerID));
} catch (IOException e) {
return FlinkCompletableFuture.completedExceptionally(new Exception("Could not delete file for " + taskManagerID + '.', e));
}
lastSubmittedFile.put(taskManagerID, blobKey);
}
} else {
lastSubmittedFile.put(taskManagerID, blobKey);
}
try {
return FlinkCompletableFuture.completed(blobCache.getURL(blobKey).getFile());
} catch (IOException e) {
return FlinkCompletableFuture.completedExceptionally(new Exception("Could not retrieve blob for " + blobKey + '.', e));
}
}
}, executor);
logPathFuture.exceptionally(new ApplyFunction<Throwable, Void>() {
@Override
public Void apply(Throwable failure) {
display(ctx, request, "Fetching TaskManager log failed.");
LOG.error("Fetching TaskManager log failed.", failure);
lastRequestPending.remove(taskManagerID);
return null;
}
});
logPathFuture.thenAccept(new AcceptFunction<String>() {
@Override
public void accept(String filePath) {
File file = new File(filePath);
final RandomAccessFile raf;
try {
raf = new RandomAccessFile(file, "r");
} catch (FileNotFoundException e) {
display(ctx, request, "Displaying TaskManager log failed.");
LOG.error("Displaying TaskManager log failed.", e);
return;
}
long fileLength;
try {
fileLength = raf.length();
} catch (IOException ioe) {
display(ctx, request, "Displaying TaskManager log failed.");
LOG.error("Displaying TaskManager log failed.", ioe);
try {
raf.close();
} catch (IOException e) {
LOG.error("Could not close random access file.", e);
}
return;
}
final FileChannel fc = raf.getChannel();
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
response.headers().set(CONTENT_TYPE, "text/plain");
if (HttpHeaders.isKeepAlive(request)) {
response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
}
HttpHeaders.setContentLength(response, fileLength);
// write the initial line and the header.
ctx.write(response);
// write the content.
ChannelFuture lastContentFuture;
final GenericFutureListener<io.netty.util.concurrent.Future<? super Void>> completionListener = new GenericFutureListener<io.netty.util.concurrent.Future<? super Void>>() {
@Override
public void operationComplete(io.netty.util.concurrent.Future<? super Void> future) throws Exception {
lastRequestPending.remove(taskManagerID);
fc.close();
raf.close();
}
};
if (ctx.pipeline().get(SslHandler.class) == null) {
ctx.write(new DefaultFileRegion(fc, 0, fileLength), ctx.newProgressivePromise()).addListener(completionListener);
lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
try {
lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise()).addListener(completionListener);
} catch (IOException e) {
display(ctx, request, "Displaying TaskManager log failed.");
LOG.warn("Could not write http data.", e);
return;
}
// HttpChunkedInput will write the end marker (LastHttpContent) for us.
}
// close the connection, if no keep-alive is needed
if (!HttpHeaders.isKeepAlive(request)) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
}
});
} catch (Exception e) {
display(ctx, request, "Error: " + e.getMessage());
LOG.error("Fetching TaskManager log failed.", e);
lastRequestPending.remove(taskManagerID);
}
} else {
display(ctx, request, "loading...");
}
}
use of io.netty.util.concurrent.GenericFutureListener in project MinecraftForge by MinecraftForge.
the class NetworkDispatcher method kickWithMessage.
private void kickWithMessage(String message) {
FMLLog.log(Level.ERROR, "Network Disconnect: %s", message);
final TextComponentString TextComponentString = new TextComponentString(message);
if (side == Side.CLIENT) {
manager.closeChannel(TextComponentString);
} else {
manager.sendPacket(new SPacketDisconnect(TextComponentString), new GenericFutureListener<Future<? super Void>>() {
@Override
public void operationComplete(Future<? super Void> result) {
manager.closeChannel(TextComponentString);
}
}, (GenericFutureListener<? extends Future<? super Void>>[]) null);
}
manager.channel().config().setAutoRead(false);
}
Aggregations