use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project vert.x by eclipse.
the class HttpServerHandlerBenchmark method setup.
@Setup
public void setup() {
vertx = (VertxInternal) Vertx.vertx();
HttpServerOptions options = new HttpServerOptions();
vertxChannel = new EmbeddedChannel(new VertxHttpRequestDecoder(options), // We don't use the VertxHttpResponseDecoder because it will use the PartialPooledByteBufAllocator
new HttpResponseEncoder() {
@Override
protected void encodeHeaders(HttpHeaders headers, ByteBuf buf) {
((HeadersMultiMap) headers).encode(buf);
}
});
vertxChannel.config().setAllocator(new Alloc());
ContextInternal context = vertx.createEventLoopContext(vertxChannel.eventLoop(), null, Thread.currentThread().getContextClassLoader());
Handler<HttpServerRequest> app = request -> {
HttpServerResponse response = request.response();
MultiMap headers = response.headers();
headers.add(HEADER_CONTENT_TYPE, RESPONSE_TYPE_PLAIN).add(HEADER_SERVER, SERVER).add(HEADER_DATE, DATE_STRING).add(HEADER_CONTENT_LENGTH, HELLO_WORLD_LENGTH);
response.end(HELLO_WORLD_BUFFER);
};
VertxHandler<Http1xServerConnection> handler = VertxHandler.create(chctx -> {
Http1xServerConnection conn = new Http1xServerConnection(() -> context, null, new HttpServerOptions(), chctx, context, "localhost", null);
conn.handler(app);
return conn;
});
vertxChannel.pipeline().addLast("handler", handler);
nettyChannel = new EmbeddedChannel(new HttpRequestDecoder(options.getMaxInitialLineLength(), options.getMaxHeaderSize(), options.getMaxChunkSize(), false, options.getDecoderInitialBufferSize()), new HttpResponseEncoder(), new SimpleChannelInboundHandler<HttpRequest>() {
private final byte[] STATIC_PLAINTEXT = "Hello, World!".getBytes(CharsetUtil.UTF_8);
private final int STATIC_PLAINTEXT_LEN = STATIC_PLAINTEXT.length;
private final ByteBuf PLAINTEXT_CONTENT_BUFFER = Unpooled.unreleasableBuffer(Unpooled.directBuffer().writeBytes(STATIC_PLAINTEXT));
private final CharSequence PLAINTEXT_CLHEADER_VALUE = new AsciiString(String.valueOf(STATIC_PLAINTEXT_LEN));
private final CharSequence TYPE_PLAIN = new AsciiString("text/plain");
private final CharSequence SERVER_NAME = new AsciiString("Netty");
private final CharSequence CONTENT_TYPE_ENTITY = HttpHeaderNames.CONTENT_TYPE;
private final CharSequence DATE_ENTITY = HttpHeaderNames.DATE;
private final CharSequence CONTENT_LENGTH_ENTITY = HttpHeaderNames.CONTENT_LENGTH;
private final CharSequence SERVER_ENTITY = HttpHeaderNames.SERVER;
private final DateFormat FORMAT = new SimpleDateFormat("E, dd MMM yyyy HH:mm:ss z");
private final CharSequence date = new AsciiString(FORMAT.format(new Date()));
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest msg) throws Exception {
writeResponse(ctx, msg, PLAINTEXT_CONTENT_BUFFER.duplicate(), TYPE_PLAIN, PLAINTEXT_CLHEADER_VALUE);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
private void writeResponse(ChannelHandlerContext ctx, HttpRequest request, ByteBuf buf, CharSequence contentType, CharSequence contentLength) {
// Build the response object.
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buf, false);
HttpHeaders headers = response.headers();
headers.set(CONTENT_TYPE_ENTITY, contentType);
headers.set(SERVER_ENTITY, SERVER_NAME);
headers.set(DATE_ENTITY, date);
headers.set(CONTENT_LENGTH_ENTITY, contentLength);
// Close the non-keep-alive connection after the write operation is done.
ctx.write(response, ctx.voidPromise());
}
});
nettyChannel.config().setAllocator(new Alloc());
GET = Unpooled.unreleasableBuffer(Unpooled.copiedBuffer(("GET / HTTP/1.1\r\n" + "\r\n").getBytes()));
readerIndex = GET.readerIndex();
writeIndex = GET.writerIndex();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project proxyee-down by monkeyWie.
the class EmbedHttpServer method start.
public void start(GenericFutureListener startedListener) {
NioEventLoopGroup bossGroup = new NioEventLoopGroup(2);
NioEventLoopGroup workGroup = new NioEventLoopGroup(2);
try {
ServerBootstrap bootstrap = new ServerBootstrap().group(bossGroup, workGroup).channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast("httpCodec", new HttpServerCodec());
ch.pipeline().addLast(new HttpObjectAggregator(4194304));
ch.pipeline().addLast("serverHandle", new SimpleChannelInboundHandler<FullHttpRequest>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception {
URI uri = new URI(request.uri());
FullHttpResponse httpResponse = invoke(uri.getPath(), ctx.channel(), request);
if (httpResponse != null) {
httpResponse.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
httpResponse.headers().set(HttpHeaderNames.CONTENT_LENGTH, httpResponse.content().readableBytes());
ch.writeAndFlush(httpResponse);
}
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) {
ctx.channel().close();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
LOGGER.error("native request error", cause.getCause() == null ? cause : cause.getCause());
Map<String, Object> data = new HashMap<>();
data.put("error", cause.getCause().toString());
FullHttpResponse httpResponse = HttpHandlerUtil.buildJson(data);
httpResponse.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
ctx.channel().writeAndFlush(httpResponse);
}
});
}
});
ChannelFuture f = bootstrap.bind("127.0.0.1", port).sync();
if (startedListener != null) {
f.addListener(startedListener);
}
f.channel().closeFuture().sync();
} catch (Exception e) {
e.printStackTrace();
} finally {
bossGroup.shutdownGracefully();
workGroup.shutdownGracefully();
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(String downstreamHost, int downstreamPort, ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder, ProxyRouterProcessingState proxyRouterProcessingState, @Nullable Span spanForDownstreamCall) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
Span currentSpan = Tracer.getInstance().getCurrentSpan();
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.handleTracingResponseTaggingAndFinalSpanNameIfNotAlreadyDone(currentSpan);
}
if (proxySpanTaggingStrategy.shouldAddWireReceiveFinishAnnotation()) {
currentSpan.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireReceiveFinishAnnotationName());
}
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.protocolVersion(), origHttpResponse.status(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.protocolVersion(), origHttpResponse.status());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.setProxyHttpResponse(httpResponse);
}
if (spanForDownstreamCall != null && proxySpanTaggingStrategy.shouldAddWireReceiveStartAnnotation()) {
spanForDownstreamCall.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireReceiveStartAnnotationName());
}
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", msg), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.setProxyError(cause);
}
if (performSubSpanAroundDownstreamCalls) {
Span currentSpan = Tracer.getInstance().getCurrentSpan();
HttpResponse proxyHttpResponseObj = (proxyRouterProcessingState == null) ? null : proxyRouterProcessingState.getProxyHttpResponse();
if (currentSpan != null && proxySpanTaggingStrategy.shouldAddErrorAnnotationForCaughtException(proxyHttpResponseObj, cause)) {
currentSpan.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.errorAnnotationName(proxyHttpResponseObj, cause));
}
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.handleTracingResponseTaggingAndFinalSpanNameIfNotAlreadyDone(currentSpan);
}
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause, true);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// Check and see if there's already an existing SslHandler in the pipeline. If it's pointed at the same
// host/port we need for this call, then we can leave it alone and don't need to create a new one.
boolean requiresNewSslHandler = true;
if (registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
SslHandler existingSslHandler = (SslHandler) p.get(SSL_HANDLER_NAME);
SSLEngine existingSslEngine = existingSslHandler.engine();
if (Objects.equals(downstreamHost, existingSslEngine.getPeerHost()) && Objects.equals(downstreamPort, existingSslEngine.getPeerPort())) {
// The existing SslHandler's SslEngine is pointed at the correct host/port. We don't need to
// add a new one.
requiresNewSslHandler = false;
} else {
// The existing SslHandler's SslEngine is *not* pointed at the correct host/port. We need a new one,
// so remove the old one.
p.remove(SSL_HANDLER_NAME);
}
}
if (requiresNewSslHandler) {
// SSL call and we need to add a SslHandler. Create the general-purpose reusable SslContexts if needed.
if (clientSslCtx == null) {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
if (insecureSslCtx == null) {
insecureSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
}
// Figure out which SslContext to use for this call.
SslContext sslCtxToUse = (relaxedHttpsValidation) ? insecureSslCtx : clientSslCtx;
// Create the SslHandler and configure the SslEngine
// as per the javadocs for SslContext.newHandler(ByteBufAllocator, String, int).
SslHandler sslHandler = sslCtxToUse.newHandler(ch.alloc(), downstreamHost, downstreamPort);
SSLEngine sslEngine = sslHandler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm(HTTPS);
sslEngine.setSSLParameters(sslParameters);
// Add the SslHandler to the pipeline in the correct location.
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, sslHandler);
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
p.remove(SSL_HANDLER_NAME);
}
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project riposte by Nike-Inc.
the class ComponentTestUtils method createNettyHttpClientBootstrap.
public static Bootstrap createNettyHttpClientBootstrap() {
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(new NioEventLoopGroup()).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new HttpClientCodec());
p.addLast(new HttpObjectAggregator(Integer.MAX_VALUE));
p.addLast("clientResponseHandler", new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception {
throw new RuntimeException("Client response handler was not setup before the call");
}
});
}
});
return bootstrap;
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project riposte by Nike-Inc.
the class VerifyTimeoutsAndProxyConnectionPoolingWorksComponentTest method verify_incomplete_call_is_timed_out.
@Test
public void verify_incomplete_call_is_timed_out() throws InterruptedException, TimeoutException, ExecutionException, IOException {
Bootstrap bootstrap = new Bootstrap();
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
try {
CompletableFuture<Pair<String, String>> responseFromServer = new CompletableFuture<>();
// Create a raw netty HTTP client so we can fiddle with headers and intentionally create a bad request
// that should trigger the bad call timeout.
bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new HttpClientCodec());
p.addLast(new HttpObjectAggregator(Integer.MAX_VALUE));
p.addLast(new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception {
if (msg instanceof FullHttpResponse) {
// Store the server response for asserting on later.
FullHttpResponse responseMsg = (FullHttpResponse) msg;
responseFromServer.complete(Pair.of(responseMsg.content().toString(CharsetUtil.UTF_8), responseMsg.headers().get(HttpHeaders.Names.CONNECTION)));
} else {
// Should never happen.
throw new RuntimeException("Received unexpected message type: " + msg.getClass());
}
}
});
}
});
// Connect to the server.
Channel ch = bootstrap.connect("localhost", downstreamServerConfig.endpointsPort()).sync().channel();
// Create a bad HTTP request. This one will be bad because it has a non-zero content-length header,
// but we're sending no payload. The server should (correctly) sit and wait for payload bytes to
// arrive until it hits the timeout, at which point it should return the correct error response.
HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, LongDelayTestEndpoint.MATCHING_PATH);
request.headers().set(HttpHeaders.Names.HOST, "localhost");
request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
request.headers().set(HttpHeaders.Names.CONTENT_LENGTH, "100");
long beforeCallTimeNanos = System.nanoTime();
// Send the bad request.
ch.writeAndFlush(request);
// Wait for the response to be received and the connection to be closed.
try {
ch.closeFuture().get(incompleteCallTimeoutMillis * 10, TimeUnit.MILLISECONDS);
responseFromServer.get(incompleteCallTimeoutMillis * 10, TimeUnit.MILLISECONDS);
} catch (TimeoutException ex) {
fail("The call took much longer than expected without receiving a response. " + "Cancelling this test - it's not working properly", ex);
}
// If we reach here then the call should be complete.
long totalCallTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - beforeCallTimeNanos);
// Verify that we got back the correct error response.
// It should be a MALFORMED_REQUEST with extra metadata explaining that the call was bad.
Pair<String, String> responseInfo = responseFromServer.get();
DefaultErrorContractDTO errorContract = objectMapper.readValue(responseInfo.getLeft(), DefaultErrorContractDTO.class);
assertThat(errorContract).isNotNull();
assertThat(errorContract.errors.size()).isEqualTo(1);
DefaultErrorDTO error = errorContract.errors.get(0);
ApiError expectedApiError = SampleCoreApiError.MALFORMED_REQUEST;
Map<String, Object> expectedMetadata = MapBuilder.builder("cause", (Object) "Unfinished/invalid HTTP request").build();
assertThat(error.code).isEqualTo(expectedApiError.getErrorCode());
assertThat(error.message).isEqualTo(expectedApiError.getMessage());
assertThat(error.metadata).isEqualTo(expectedMetadata);
// The server should have closed the connection even though we asked for keep-alive.
assertThat(responseInfo.getRight()).isEqualTo(HttpHeaders.Values.CLOSE);
// Total call time should be pretty close to incompleteCallTimeoutMillis give or take a few
// milliseconds, but due to the inability to account for slow machines running the unit tests,
// a server that isn't warmed up, etc, we can't put a ceiling on the wiggle room we'd need, so
// we'll just verify it took at least the minimum necessary amount of time.
assertThat(totalCallTimeMillis).isGreaterThanOrEqualTo(incompleteCallTimeoutMillis);
} finally {
eventLoopGroup.shutdownGracefully();
}
}
Aggregations