Search in sources :

Example 6 with GenericFutureListener

use of io.netty.util.concurrent.GenericFutureListener in project riposte by Nike-Inc.

the class ChannelPipelineFinalizerHandlerTest method finalizeChannelPipeline_should_send_event_to_metricsListener_for_successful_response_and_flush_context.

@Test
public void finalizeChannelPipeline_should_send_event_to_metricsListener_for_successful_response_and_flush_context() throws Exception {
    // given
    ChannelFuture responseWriterChannelFuture = mock(ChannelFuture.class);
    state.setResponseWriterFinalChunkChannelFuture(responseWriterChannelFuture);
    HttpProcessingState stateSpy = spy(state);
    doReturn(stateSpy).when(stateAttributeMock).get();
    ChannelFuture responseWriteFutureResult = mock(ChannelFuture.class);
    doReturn(true).when(responseWriteFutureResult).isSuccess();
    Assertions.assertThat(stateSpy.isRequestMetricsRecordedOrScheduled()).isFalse();
    // when
    handler.finalizeChannelPipeline(ctxMock, null, stateSpy, null);
    // then
    ArgumentCaptor<GenericFutureListener> channelFutureListenerArgumentCaptor = ArgumentCaptor.forClass(GenericFutureListener.class);
    verify(responseWriterChannelFuture).addListener(channelFutureListenerArgumentCaptor.capture());
    GenericFutureListener futureListener = channelFutureListenerArgumentCaptor.getValue();
    assertThat(futureListener, notNullValue());
    futureListener.operationComplete(responseWriteFutureResult);
    verify(metricsListenerMock).onEvent(eq(ServerMetricsEvent.RESPONSE_SENT), any(HttpProcessingState.class));
    verify(ctxMock).flush();
    Assertions.assertThat(stateSpy.isRequestMetricsRecordedOrScheduled()).isTrue();
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) HttpProcessingState(com.nike.riposte.server.http.HttpProcessingState) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener) Test(org.junit.Test)

Example 7 with GenericFutureListener

use of io.netty.util.concurrent.GenericFutureListener in project riposte by Nike-Inc.

the class OpenChannelLimitHandlerTest method doChannelActive_marks_and_schedules_double_check_timeout_if_too_many_open_channels.

@DataProvider(value = { "0", "1" })
@Test
public void doChannelActive_marks_and_schedules_double_check_timeout_if_too_many_open_channels(int numOpenChannelsGreaterThanMax) throws Exception {
    // given
    int actualOpenChannels = maxOpenChannelsThreshold + numOpenChannelsGreaterThanMax;
    setActualOpenChannels(actualOpenChannels);
    // when
    PipelineContinuationBehavior result = handler.doChannelActive(ctxMock);
    // then
    assertThat(result).isEqualTo(CONTINUE);
    Pair<Runnable, GenericFutureListener> futureInfoPair = extractDoubleCheckRunnableAndCloseFutureListener();
    verify(tooManyOpenConnectionsAttributeMock).set(actualOpenChannels);
    verifyDoubleCheckFuture(futureInfoPair.getLeft());
    verifyCloseFutureListener(futureInfoPair.getRight());
    verify(channelGroupMock, never()).add(channelMock);
}
Also used : PipelineContinuationBehavior(com.nike.riposte.server.handler.base.PipelineContinuationBehavior) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener) DataProvider(com.tngtech.java.junit.dataprovider.DataProvider) Test(org.junit.Test)

Example 8 with GenericFutureListener

use of io.netty.util.concurrent.GenericFutureListener in project riposte by Nike-Inc.

the class DTraceEndHandlerTest method endDtrace_completes_the_trace_using_ChannelFutureListener_if_state_is_not_null_and_isResponseSendingLastChunkSent_returns_true.

@Test
public void endDtrace_completes_the_trace_using_ChannelFutureListener_if_state_is_not_null_and_isResponseSendingLastChunkSent_returns_true() throws Exception {
    // given
    assertThat(state.isTraceCompletedOrScheduled(), is(false));
    assertThat(state.isResponseSendingLastChunkSent(), is(true));
    assertThat(state.getDistributedTraceStack(), nullValue());
    Pair<Deque<Span>, Map<String, String>> expectedDtraceInfo = setupStateWithNewSpan("blahTrace");
    assertThat(state.getDistributedTraceStack(), notNullValue());
    assertThat(state.getDistributedTraceStack(), is(expectedDtraceInfo.getLeft()));
    assertThat(state.getDistributedTraceStack().size(), is(1));
    Span expectedSpan = expectedDtraceInfo.getLeft().peek();
    // when
    handlerSpy.endDtrace(ctxMock);
    // then
    // completeCurrentSpan() not immediately called, but scheduled
    verify(handlerSpy, never()).completeCurrentSpan();
    assertThat(state.isTraceCompletedOrScheduled(), is(true));
    // Extract the listener that was attached to the last chunk future.
    GenericFutureListener lastChunkListener = extractChannelFutureListenerAddedToLastChunkFuture();
    assertThat(lastChunkListener, notNullValue());
    assertThat(lastChunkListener, instanceOf(ChannelFutureListenerWithTracingAndMdc.class));
    assertThat(Whitebox.getInternalState(lastChunkListener, "distributedTraceStackForExecution"), is(expectedDtraceInfo.getLeft()));
    assertThat(Whitebox.getInternalState(lastChunkListener, "mdcContextMapForExecution"), is(expectedDtraceInfo.getRight()));
    Consumer<ChannelFuture> embeddedListenerConsumer = (Consumer<ChannelFuture>) Whitebox.getInternalState(lastChunkListener, "postCompleteOperation");
    // Execute the embedded listener so we can validate what it does. Note that we can't verify using mockito spy verify(),
    //      because the method call goes through the internal handler, not the spy impl. But we can still verify by
    //      setting up the Tracer state to what we expect, execute the embedded listener, and verify subsequent Tracer state.
    AsyncNettyHelper.linkTracingAndMdcToCurrentThread(expectedDtraceInfo);
    assertThat(Tracer.getInstance().getCurrentSpan(), is(expectedSpan));
    embeddedListenerConsumer.accept(null);
    assertThat(Tracer.getInstance().getCurrentSpan(), nullValue());
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) Consumer(java.util.function.Consumer) ChannelFutureListenerWithTracingAndMdc(com.nike.riposte.util.asynchelperwrapper.ChannelFutureListenerWithTracingAndMdc) Deque(java.util.Deque) Map(java.util.Map) Span(com.nike.wingtips.Span) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener) Test(org.junit.Test)

Example 9 with GenericFutureListener

use of io.netty.util.concurrent.GenericFutureListener in project vert.x by eclipse.

the class VertxImpl method deleteCacheDirAndShutdown.

@SuppressWarnings("unchecked")
private void deleteCacheDirAndShutdown(Handler<AsyncResult<Void>> completionHandler) {
    fileResolver.close(res -> {
        workerPool.close();
        internalBlockingPool.close();
        new ArrayList<>(namedWorkerPools.values()).forEach(WorkerPool::close);
        acceptorEventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).addListener(new GenericFutureListener() {

            @Override
            public void operationComplete(io.netty.util.concurrent.Future future) throws Exception {
                if (!future.isSuccess()) {
                    log.warn("Failure in shutting down acceptor event loop group", future.cause());
                }
                eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).addListener(new GenericFutureListener() {

                    @Override
                    public void operationComplete(io.netty.util.concurrent.Future future) throws Exception {
                        if (!future.isSuccess()) {
                            log.warn("Failure in shutting down event loop group", future.cause());
                        }
                        if (metrics != null) {
                            metrics.close();
                        }
                        checker.close();
                        if (completionHandler != null) {
                            eventLoopThreadFactory.newThread(() -> {
                                completionHandler.handle(Future.succeededFuture());
                            }).start();
                        }
                    }
                });
            }
        });
    });
}
Also used : Future(io.vertx.core.Future) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener)

Example 10 with GenericFutureListener

use of io.netty.util.concurrent.GenericFutureListener in project flink by apache.

the class TaskManagerLogHandler method respondAsLeader.

/**
	 * Response when running with leading JobManager.
	 */
@Override
protected void respondAsLeader(final ChannelHandlerContext ctx, final Routed routed, final ActorGateway jobManager) {
    if (cache == null) {
        scala.concurrent.Future<Object> portFuture = jobManager.ask(JobManagerMessages.getRequestBlobManagerPort(), timeout);
        scala.concurrent.Future<BlobCache> cacheFuture = portFuture.map(new Mapper<Object, BlobCache>() {

            @Override
            public BlobCache checkedApply(Object result) throws IOException {
                Option<String> hostOption = jobManager.actor().path().address().host();
                String host = hostOption.isDefined() ? hostOption.get() : "localhost";
                int port = (int) result;
                return new BlobCache(new InetSocketAddress(host, port), config);
            }
        }, executor);
        cache = new FlinkFuture<>(cacheFuture);
    }
    final String taskManagerID = routed.pathParams().get(TaskManagersHandler.TASK_MANAGER_ID_KEY);
    final HttpRequest request = routed.request();
    //fetch TaskManager logs if no other process is currently doing it
    if (lastRequestPending.putIfAbsent(taskManagerID, true) == null) {
        try {
            InstanceID instanceID = new InstanceID(StringUtils.hexStringToByte(taskManagerID));
            scala.concurrent.Future<JobManagerMessages.TaskManagerInstance> scalaTaskManagerFuture = jobManager.ask(new JobManagerMessages.RequestTaskManagerInstance(instanceID), timeout).mapTo(ClassTag$.MODULE$.<JobManagerMessages.TaskManagerInstance>apply(JobManagerMessages.TaskManagerInstance.class));
            Future<JobManagerMessages.TaskManagerInstance> taskManagerFuture = new FlinkFuture<>(scalaTaskManagerFuture);
            Future<BlobKey> blobKeyFuture = taskManagerFuture.thenCompose(new ApplyFunction<JobManagerMessages.TaskManagerInstance, Future<BlobKey>>() {

                @Override
                public Future<BlobKey> apply(JobManagerMessages.TaskManagerInstance value) {
                    Instance taskManager = value.instance().get();
                    if (serveLogFile) {
                        return taskManager.getTaskManagerGateway().requestTaskManagerLog(timeTimeout);
                    } else {
                        return taskManager.getTaskManagerGateway().requestTaskManagerStdout(timeTimeout);
                    }
                }
            });
            Future<String> logPathFuture = blobKeyFuture.thenCombine(cache, new BiFunction<BlobKey, BlobCache, Tuple2<BlobKey, BlobCache>>() {

                @Override
                public Tuple2<BlobKey, BlobCache> apply(BlobKey blobKey, BlobCache blobCache) {
                    return Tuple2.of(blobKey, blobCache);
                }
            }).thenComposeAsync(new ApplyFunction<Tuple2<BlobKey, BlobCache>, Future<String>>() {

                @Override
                public Future<String> apply(Tuple2<BlobKey, BlobCache> value) {
                    final BlobKey blobKey = value.f0;
                    final BlobCache blobCache = value.f1;
                    //delete previous log file, if it is different than the current one
                    HashMap<String, BlobKey> lastSubmittedFile = serveLogFile ? lastSubmittedLog : lastSubmittedStdout;
                    if (lastSubmittedFile.containsKey(taskManagerID)) {
                        if (!blobKey.equals(lastSubmittedFile.get(taskManagerID))) {
                            try {
                                blobCache.deleteGlobal(lastSubmittedFile.get(taskManagerID));
                            } catch (IOException e) {
                                return FlinkCompletableFuture.completedExceptionally(new Exception("Could not delete file for " + taskManagerID + '.', e));
                            }
                            lastSubmittedFile.put(taskManagerID, blobKey);
                        }
                    } else {
                        lastSubmittedFile.put(taskManagerID, blobKey);
                    }
                    try {
                        return FlinkCompletableFuture.completed(blobCache.getURL(blobKey).getFile());
                    } catch (IOException e) {
                        return FlinkCompletableFuture.completedExceptionally(new Exception("Could not retrieve blob for " + blobKey + '.', e));
                    }
                }
            }, executor);
            logPathFuture.exceptionally(new ApplyFunction<Throwable, Void>() {

                @Override
                public Void apply(Throwable failure) {
                    display(ctx, request, "Fetching TaskManager log failed.");
                    LOG.error("Fetching TaskManager log failed.", failure);
                    lastRequestPending.remove(taskManagerID);
                    return null;
                }
            });
            logPathFuture.thenAccept(new AcceptFunction<String>() {

                @Override
                public void accept(String filePath) {
                    File file = new File(filePath);
                    final RandomAccessFile raf;
                    try {
                        raf = new RandomAccessFile(file, "r");
                    } catch (FileNotFoundException e) {
                        display(ctx, request, "Displaying TaskManager log failed.");
                        LOG.error("Displaying TaskManager log failed.", e);
                        return;
                    }
                    long fileLength;
                    try {
                        fileLength = raf.length();
                    } catch (IOException ioe) {
                        display(ctx, request, "Displaying TaskManager log failed.");
                        LOG.error("Displaying TaskManager log failed.", ioe);
                        try {
                            raf.close();
                        } catch (IOException e) {
                            LOG.error("Could not close random access file.", e);
                        }
                        return;
                    }
                    final FileChannel fc = raf.getChannel();
                    HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
                    response.headers().set(CONTENT_TYPE, "text/plain");
                    if (HttpHeaders.isKeepAlive(request)) {
                        response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
                    }
                    HttpHeaders.setContentLength(response, fileLength);
                    // write the initial line and the header.
                    ctx.write(response);
                    // write the content.
                    ChannelFuture lastContentFuture;
                    final GenericFutureListener<io.netty.util.concurrent.Future<? super Void>> completionListener = new GenericFutureListener<io.netty.util.concurrent.Future<? super Void>>() {

                        @Override
                        public void operationComplete(io.netty.util.concurrent.Future<? super Void> future) throws Exception {
                            lastRequestPending.remove(taskManagerID);
                            fc.close();
                            raf.close();
                        }
                    };
                    if (ctx.pipeline().get(SslHandler.class) == null) {
                        ctx.write(new DefaultFileRegion(fc, 0, fileLength), ctx.newProgressivePromise()).addListener(completionListener);
                        lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
                    } else {
                        try {
                            lastContentFuture = ctx.writeAndFlush(new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise()).addListener(completionListener);
                        } catch (IOException e) {
                            display(ctx, request, "Displaying TaskManager log failed.");
                            LOG.warn("Could not write http data.", e);
                            return;
                        }
                    // HttpChunkedInput will write the end marker (LastHttpContent) for us.
                    }
                    // close the connection, if no keep-alive is needed
                    if (!HttpHeaders.isKeepAlive(request)) {
                        lastContentFuture.addListener(ChannelFutureListener.CLOSE);
                    }
                }
            });
        } catch (Exception e) {
            display(ctx, request, "Error: " + e.getMessage());
            LOG.error("Fetching TaskManager log failed.", e);
            lastRequestPending.remove(taskManagerID);
        }
    } else {
        display(ctx, request, "loading...");
    }
}
Also used : InstanceID(org.apache.flink.runtime.instance.InstanceID) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) FileNotFoundException(java.io.FileNotFoundException) FlinkFuture(org.apache.flink.runtime.concurrent.impl.FlinkFuture) BlobKey(org.apache.flink.runtime.blob.BlobKey) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener) ChannelFuture(io.netty.channel.ChannelFuture) ChunkedFile(io.netty.handler.stream.ChunkedFile) JobManagerMessages(org.apache.flink.runtime.messages.JobManagerMessages) DefaultFileRegion(io.netty.channel.DefaultFileRegion) RandomAccessFile(java.io.RandomAccessFile) Option(scala.Option) RandomAccessFile(java.io.RandomAccessFile) ChunkedFile(io.netty.handler.stream.ChunkedFile) File(java.io.File) Instance(org.apache.flink.runtime.instance.Instance) BlobCache(org.apache.flink.runtime.blob.BlobCache) HttpChunkedInput(io.netty.handler.codec.http.HttpChunkedInput) HttpRequest(io.netty.handler.codec.http.HttpRequest) FileChannel(java.nio.channels.FileChannel) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(io.netty.handler.codec.http.HttpResponse) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) BiFunction(org.apache.flink.runtime.concurrent.BiFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) FlinkFuture(org.apache.flink.runtime.concurrent.impl.FlinkFuture) Future(org.apache.flink.runtime.concurrent.Future) FlinkCompletableFuture(org.apache.flink.runtime.concurrent.impl.FlinkCompletableFuture) ChannelFuture(io.netty.channel.ChannelFuture)

Aggregations

GenericFutureListener (io.netty.util.concurrent.GenericFutureListener)15 Test (org.junit.Test)7 ChannelFuture (io.netty.channel.ChannelFuture)6 Future (io.netty.util.concurrent.Future)4 HttpProcessingState (com.nike.riposte.server.http.HttpProcessingState)2 Bootstrap (io.netty.bootstrap.Bootstrap)2 ByteBuf (io.netty.buffer.ByteBuf)2 HttpChunkedInput (io.netty.handler.codec.http.HttpChunkedInput)2 HttpRequest (io.netty.handler.codec.http.HttpRequest)2 IOException (java.io.IOException)2 InetSocketAddress (java.net.InetSocketAddress)2 Map (java.util.Map)2 PipelineContinuationBehavior (com.nike.riposte.server.handler.base.PipelineContinuationBehavior)1 ChannelFutureListenerWithTracingAndMdc (com.nike.riposte.util.asynchelperwrapper.ChannelFutureListenerWithTracingAndMdc)1 Span (com.nike.wingtips.Span)1 DataProvider (com.tngtech.java.junit.dataprovider.DataProvider)1 ConnectionState (io.github.tesla.gateway.netty.transmit.ConnectionState)1 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)1 Channel (io.netty.channel.Channel)1 ChannelFutureListener (io.netty.channel.ChannelFutureListener)1