use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project netty by netty.
the class HttpToHttp2ConnectionHandlerTest method testChunkedRequestWithBodyAndTrailingHeaders.
@Test
public void testChunkedRequestWithBodyAndTrailingHeaders() throws Exception {
final String text = "foooooo";
final String text2 = "goooo";
final List<String> receivedBuffers = Collections.synchronizedList(new ArrayList<String>());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock in) throws Throwable {
receivedBuffers.add(((ByteBuf) in.getArguments()[2]).toString(UTF_8));
return null;
}
}).when(serverListener).onDataRead(any(ChannelHandlerContext.class), eq(3), any(ByteBuf.class), eq(0), eq(false));
bootstrapEnv(4, 1, 1);
final HttpRequest request = new DefaultHttpRequest(HTTP_1_1, POST, "http://your_user-name123@www.example.org:5555/example");
final HttpHeaders httpHeaders = request.headers();
httpHeaders.set(HttpHeaderNames.HOST, "www.example.org:5555");
httpHeaders.add(HttpHeaderNames.TRANSFER_ENCODING, "chunked");
httpHeaders.add(of("foo"), of("goo"));
httpHeaders.add(of("foo"), of("goo2"));
httpHeaders.add(of("foo2"), of("goo2"));
final Http2Headers http2Headers = new DefaultHttp2Headers().method(new AsciiString("POST")).path(new AsciiString("/example")).authority(new AsciiString("www.example.org:5555")).scheme(new AsciiString("http")).add(new AsciiString("foo"), new AsciiString("goo")).add(new AsciiString("foo"), new AsciiString("goo2")).add(new AsciiString("foo2"), new AsciiString("goo2"));
final DefaultHttpContent httpContent = new DefaultHttpContent(Unpooled.copiedBuffer(text, UTF_8));
final LastHttpContent lastHttpContent = new DefaultLastHttpContent(Unpooled.copiedBuffer(text2, UTF_8));
lastHttpContent.trailingHeaders().add(of("trailing"), of("bar"));
final Http2Headers http2TrailingHeaders = new DefaultHttp2Headers().add(new AsciiString("trailing"), new AsciiString("bar"));
ChannelPromise writePromise = newPromise();
ChannelFuture writeFuture = clientChannel.write(request, writePromise);
ChannelPromise contentPromise = newPromise();
ChannelFuture contentFuture = clientChannel.write(httpContent, contentPromise);
ChannelPromise lastContentPromise = newPromise();
ChannelFuture lastContentFuture = clientChannel.write(lastHttpContent, lastContentPromise);
clientChannel.flush();
assertTrue(writePromise.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(writePromise.isSuccess());
assertTrue(writeFuture.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(writeFuture.isSuccess());
assertTrue(contentPromise.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(contentPromise.isSuccess());
assertTrue(contentFuture.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(contentFuture.isSuccess());
assertTrue(lastContentPromise.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(lastContentPromise.isSuccess());
assertTrue(lastContentFuture.awaitUninterruptibly(WAIT_TIME_SECONDS, SECONDS));
assertTrue(lastContentFuture.isSuccess());
awaitRequests();
verify(serverListener).onHeadersRead(any(ChannelHandlerContext.class), eq(3), eq(http2Headers), eq(0), anyShort(), anyBoolean(), eq(0), eq(false));
verify(serverListener).onDataRead(any(ChannelHandlerContext.class), eq(3), any(ByteBuf.class), eq(0), eq(false));
verify(serverListener).onHeadersRead(any(ChannelHandlerContext.class), eq(3), eq(http2TrailingHeaders), eq(0), anyShort(), anyBoolean(), eq(0), eq(true));
assertEquals(1, receivedBuffers.size());
assertEquals(text + text2, receivedBuffers.get(0));
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project zuul by Netflix.
the class ClientRequestReceiver method channelReadInternal.
private void channelReadInternal(final ChannelHandlerContext ctx, Object msg) throws Exception {
// a response to the client channel.
if (msg instanceof LastHttpContent) {
ctx.channel().attr(ATTR_LAST_CONTENT_RECEIVED).set(Boolean.TRUE);
}
if (msg instanceof HttpRequest) {
clientRequest = (HttpRequest) msg;
zuulRequest = buildZuulHttpRequest(clientRequest, ctx);
// Handle invalid HTTP requests.
if (clientRequest.decoderResult().isFailure()) {
LOG.warn("Invalid http request. clientRequest = {} , uri = {}, info = {}", clientRequest, clientRequest.uri(), ChannelUtils.channelInfoForLogging(ctx.channel()), clientRequest.decoderResult().cause());
StatusCategoryUtils.setStatusCategory(zuulRequest.getContext(), ZuulStatusCategory.FAILURE_CLIENT_BAD_REQUEST);
RejectionUtils.rejectByClosingConnection(ctx, ZuulStatusCategory.FAILURE_CLIENT_BAD_REQUEST, "decodefailure", clientRequest, /* injectedLatencyMillis= */
null);
return;
} else if (zuulRequest.hasBody() && zuulRequest.getBodyLength() > zuulRequest.getMaxBodySize()) {
String errorMsg = "Request too large. " + "clientRequest = " + clientRequest.toString() + ", uri = " + String.valueOf(clientRequest.uri()) + ", info = " + ChannelUtils.channelInfoForLogging(ctx.channel());
final ZuulException ze = new ZuulException(errorMsg);
ze.setStatusCode(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE.code());
StatusCategoryUtils.setStatusCategory(zuulRequest.getContext(), ZuulStatusCategory.FAILURE_CLIENT_BAD_REQUEST);
zuulRequest.getContext().setError(ze);
zuulRequest.getContext().setShouldSendErrorResponse(true);
} else if (zuulRequest.getHeaders().getAll(HttpHeaderNames.HOST.toString()).size() > 1) {
LOG.debug("Multiple Host headers. clientRequest = {} , uri = {}, info = {}", clientRequest, clientRequest.uri(), ChannelUtils.channelInfoForLogging(ctx.channel()));
final ZuulException ze = new ZuulException("Multiple Host headers");
ze.setStatusCode(HttpResponseStatus.BAD_REQUEST.code());
StatusCategoryUtils.setStatusCategory(zuulRequest.getContext(), ZuulStatusCategory.FAILURE_CLIENT_BAD_REQUEST);
zuulRequest.getContext().setError(ze);
zuulRequest.getContext().setShouldSendErrorResponse(true);
}
handleExpect100Continue(ctx, clientRequest);
// Send the request down the filter pipeline
ctx.fireChannelRead(zuulRequest);
} else if (msg instanceof HttpContent) {
if ((zuulRequest != null) && (!zuulRequest.getContext().isCancelled())) {
ctx.fireChannelRead(msg);
} else {
// We already sent response for this request, these are laggard request body chunks that are still arriving
ReferenceCountUtil.release(msg);
}
} else if (msg instanceof HAProxyMessage) {
// do nothing, should already be handled by ElbProxyProtocolHandler
LOG.debug("Received HAProxyMessage for Proxy Protocol IP: {}", ((HAProxyMessage) msg).sourceAddress());
ReferenceCountUtil.release(msg);
} else {
LOG.debug("Received unrecognized message type. " + msg.getClass().getName());
ReferenceCountUtil.release(msg);
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project zuul by Netflix.
the class Http2ContentLengthEnforcingHandler method channelRead.
/**
* This checks that the content length does what it says, preventing a client from causing Zuul to misinterpret the
* request. Because this class is meant to work in an HTTP/2 setting, the content length and transfer encoding
* checks are more semantics. In particular, this checks:
* <ul>
* <li>No duplicate Content length</li>
* <li>Content Length (if present) must always be greater than or equal to how much content has been seen</li>
* <li>Content Length (if present) must always be equal to how much content has been seen by the end</li>
* <li>Content Length cannot be present along with chunked transfer encoding.</li>
* </ul>
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof HttpRequest) {
HttpRequest req = (HttpRequest) msg;
List<String> lengthHeaders = req.headers().getAll(HttpHeaderNames.CONTENT_LENGTH);
if (lengthHeaders.size() > 1) {
ctx.writeAndFlush(new DefaultHttp2ResetFrame(Http2Error.PROTOCOL_ERROR));
return;
} else if (lengthHeaders.size() == 1) {
expectedContentLength = Long.parseLong(lengthHeaders.get(0));
if (expectedContentLength < 0) {
// TODO(carl-mastrangelo): this is not right, but meh. Fix this to return a proper 400.
ctx.writeAndFlush(new DefaultHttp2ResetFrame(Http2Error.PROTOCOL_ERROR));
return;
}
}
if (hasContentLength() && HttpUtil.isTransferEncodingChunked(req)) {
// TODO(carl-mastrangelo): this is not right, but meh. Fix this to return a proper 400.
ctx.writeAndFlush(new DefaultHttp2ResetFrame(Http2Error.PROTOCOL_ERROR));
return;
}
}
if (msg instanceof HttpContent) {
ByteBuf content = ((HttpContent) msg).content();
incrementSeenContent(content.readableBytes());
if (hasContentLength() && seenContentLength > expectedContentLength) {
// TODO(carl-mastrangelo): this is not right, but meh. Fix this to return a proper 400.
ctx.writeAndFlush(new DefaultHttp2ResetFrame(Http2Error.PROTOCOL_ERROR));
return;
}
}
if (msg instanceof LastHttpContent) {
if (hasContentLength() && seenContentLength != expectedContentLength) {
// TODO(carl-mastrangelo): this is not right, but meh. Fix this to return a proper 400.
ctx.writeAndFlush(new DefaultHttp2ResetFrame(Http2Error.PROTOCOL_ERROR));
return;
}
}
super.channelRead(ctx, msg);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project vert.x by eclipse.
the class WebSocketHandshakeInboundHandler method channelRead.
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof HttpResponse) {
HttpResponse resp = (HttpResponse) msg;
response = new DefaultFullHttpResponse(resp.protocolVersion(), resp.status());
response.headers().add(resp.headers());
}
if (msg instanceof HttpContent) {
HttpContent content = (HttpContent) msg;
try {
if (response != null) {
response.content().writeBytes(content.content());
if (msg instanceof LastHttpContent) {
response.trailingHeaders().add(((LastHttpContent) msg).trailingHeaders());
ChannelPipeline pipeline = chctx.pipeline();
pipeline.remove(WebSocketHandshakeInboundHandler.this);
ChannelHandler handler = pipeline.get(HttpContentDecompressor.class);
if (handler != null) {
// remove decompressor as its not needed anymore once connection was upgraded to WebSocket
ctx.pipeline().remove(handler);
}
Future<HeadersAdaptor> fut = handshakeComplete(response);
wsHandler.handle(fut);
}
}
} finally {
content.release();
}
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project crate by crate.
the class HttpBlobHandler method put.
private void put(HttpRequest request, HttpContent content, String index, String digest) throws IOException {
if (digestBlob == null) {
digestBlob = blobService.newBlob(index, digest);
}
boolean continueExpected = HttpUtil.is100ContinueExpected(currentMessage);
if (content == null) {
if (continueExpected) {
ctx.writeAndFlush(new DefaultFullHttpResponse(HTTP_1_1, CONTINUE));
}
return;
}
boolean isLast = content instanceof LastHttpContent;
ByteBuf byteBuf = content.content();
try {
writeToFile(request, byteBuf, isLast, continueExpected);
} finally {
byteBuf.release();
}
}
Aggregations