use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project reactor-netty by reactor.
the class HttpClientOperations method onInboundNext.
@Override
protected void onInboundNext(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof HttpResponse) {
HttpResponse response = (HttpResponse) msg;
if (response.decoderResult().isFailure()) {
onInboundError(response.decoderResult().cause());
return;
}
if (started) {
if (log.isDebugEnabled()) {
log.debug("{} An HttpClientOperations cannot proceed more than one " + "Response", channel(), response.headers().toString());
}
return;
}
started = true;
setNettyResponse(response);
if (!isKeepAlive()) {
markPersistent(false);
}
if (isInboundCancelled()) {
ReferenceCountUtil.release(msg);
return;
}
if (log.isDebugEnabled()) {
log.debug("{} Received response (auto-read:{}) : {}", channel(), channel().config().isAutoRead(), responseHeaders().entries().toString());
}
if (checkResponseCode(response)) {
prefetchMore(ctx);
parentContext().fireContextActive(this);
}
if (msg instanceof FullHttpResponse) {
super.onInboundNext(ctx, msg);
onHandlerTerminate();
}
return;
}
if (msg instanceof LastHttpContent) {
if (!started) {
if (log.isDebugEnabled()) {
log.debug("{} HttpClientOperations received an incorrect end " + "delimiter" + "(previously used connection?)", channel());
}
return;
}
if (log.isDebugEnabled()) {
log.debug("{} Received last HTTP packet", channel());
}
if (msg != LastHttpContent.EMPTY_LAST_CONTENT) {
super.onInboundNext(ctx, msg);
}
// force auto read to enable more accurate close selection now inbound is done
channel().config().setAutoRead(true);
onHandlerTerminate();
return;
}
if (!started) {
if (log.isDebugEnabled()) {
if (msg instanceof ByteBufHolder) {
msg = ((ByteBufHolder) msg).content();
}
log.debug("{} HttpClientOperations received an incorrect chunk " + "" + "(previously used connection?)", channel(), msg);
}
return;
}
super.onInboundNext(ctx, msg);
prefetchMore(ctx);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project reactor-netty by reactor.
the class HttpServerHandler method channelRead.
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
// read message and track if it was keepAlive
if (msg instanceof HttpRequest) {
final HttpRequest request = (HttpRequest) msg;
DecoderResult decoderResult = request.decoderResult();
if (decoderResult.isFailure()) {
Throwable cause = decoderResult.cause();
HttpServerOperations.log.debug("Decoding failed: " + msg + " : ", cause);
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, cause instanceof TooLongFrameException ? HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE : HttpResponseStatus.BAD_REQUEST);
response.headers().setInt(HttpHeaderNames.CONTENT_LENGTH, 0).set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
return;
}
if (persistentConnection) {
pendingResponses += 1;
if (HttpServerOperations.log.isDebugEnabled()) {
HttpServerOperations.log.debug("Increasing pending responses, now " + "{}", pendingResponses);
}
persistentConnection = isKeepAlive(request);
} else {
if (HttpServerOperations.log.isDebugEnabled()) {
HttpServerOperations.log.debug("dropping pipelined HTTP request, " + "previous response requested connection close");
}
ReferenceCountUtil.release(msg);
return;
}
if (pendingResponses > 1) {
if (HttpServerOperations.log.isDebugEnabled()) {
HttpServerOperations.log.debug("buffering pipelined HTTP request, " + "pending response count: {}, queue: {}", pendingResponses, pipelined != null ? pipelined.size() : 0);
}
overflow = true;
doPipeline(ctx, msg);
return;
} else {
overflow = false;
parentContext.createOperations(ctx.channel(), msg);
if (!(msg instanceof FullHttpRequest)) {
return;
}
}
} else if (persistentConnection && pendingResponses == 0) {
if (HttpServerOperations.log.isDebugEnabled()) {
HttpServerOperations.log.debug("Dropped HTTP content, " + "Since response has been sent already:{}", msg);
}
if (msg instanceof LastHttpContent) {
ctx.fireChannelRead(msg);
} else {
ReferenceCountUtil.release(msg);
}
ctx.read();
return;
} else if (overflow) {
if (HttpServerOperations.log.isDebugEnabled()) {
HttpServerOperations.log.debug("buffering pipelined HTTP content, " + "pending response count: {}, pending pipeline:{}", pendingResponses, pipelined != null ? pipelined.size() : 0);
}
doPipeline(ctx, msg);
return;
}
ctx.fireChannelRead(msg);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project openzaly by akaxincom.
the class HttpServerHandler method channelRead.
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
try {
/**
* http-request包含: <br>
* 1.请求行 ,Method Request-URI Http-Version CRLF<br>
* 2.消息头 <br>
* 3.请求正文 <br>
*/
if (msg instanceof HttpRequest) {
request = (HttpRequest) msg;
if (!checkLegalRequest()) {
logger.error("{} http request method error. please use post!", AkxProject.PLN);
ctx.close();
return;
}
String clientIp = request.headers().get(HttpConst.HTTP_H_FORWARDED);
if (clientIp == null) {
InetSocketAddress address = (InetSocketAddress) ctx.channel().remoteAddress();
clientIp = address.getAddress().getHostAddress();
}
if (!checkLegalClientIp(clientIp)) {
logger.error("{} http request illegal IP={}.", AkxProject.PLN, clientIp);
ctx.close();
return;
}
logger.debug("{} request uri:{} clientIp={}", AkxProject.PLN, request.uri(), clientIp);
}
/**
* HttpContent:表示HTTP实体正文和内容标头的基类 <br>
* method.name=POST 传输消息体存在内容
*/
if (msg instanceof LastHttpContent) {
HttpContent content = (HttpContent) msg;
ByteBuf httpByteBuf = content.content();
if (httpByteBuf == null) {
return;
}
if (!checkLegalRequest()) {
ctx.close();
return;
}
String clientIp = request.headers().get(HttpConst.HTTP_H_FORWARDED);
String sitePluginId = request.headers().get(PluginConst.SITE_PLUGIN_ID);
byte[] contentBytes = new byte[httpByteBuf.readableBytes()];
httpByteBuf.readBytes(contentBytes);
httpByteBuf.release();
// 查询扩展的auth——key
String authKey = PluginSession.getInstance().getPluginAuthKey(sitePluginId);
if (StringUtils.isNotEmpty(authKey)) {
// byte[] tsk = AESCrypto.generateTSKey(authKey);
byte[] tsk = authKey.getBytes(CharsetCoding.ISO_8859_1);
byte[] decContent = AESCrypto.decrypt(tsk, contentBytes);
contentBytes = decContent;
}
PluginProto.ProxyPluginPackage pluginPackage = PluginProto.ProxyPluginPackage.parseFrom(contentBytes);
Map<Integer, String> proxyHeader = pluginPackage.getPluginHeaderMap();
String requestTime = proxyHeader.get(PluginProto.PluginHeaderKey.PLUGIN_TIMESTAMP_VALUE);
long currentTime = System.currentTimeMillis();
boolean timeOut = true;
if (StringUtils.isNotEmpty(requestTime)) {
long timeMills = Long.valueOf(requestTime);
if (currentTime - timeMills < 10 * 1000l) {
timeOut = false;
}
}
logger.debug("{} client={} http request timeOut={} currTime={} reqTime={}", AkxProject.PLN, clientIp, timeOut, currentTime, requestTime);
if (!timeOut) {
Command command = new Command();
command.setField(PluginConst.PLUGIN_AUTH_KEY, authKey);
if (proxyHeader != null) {
command.setSiteUserId(proxyHeader.get(PluginProto.PluginHeaderKey.CLIENT_SITE_USER_ID_VALUE));
}
command.setChannelContext(ctx);
command.setUri(request.uri());
command.setParams(Base64.getDecoder().decode(pluginPackage.getData()));
command.setClientIp(clientIp);
command.setStartTime(System.currentTimeMillis());
logger.debug("{} client={} http server handler command={}", AkxProject.PLN, clientIp, command.toString());
CommandResponse response = this.executor.execute(HttpUriAction.HTTP_ACTION.getUri(), command);
LogUtils.requestResultLog(logger, command, response);
} else {
// 超时10s,认为此请求失效,直接断开连接
ctx.close();
logger.error("{} client={} http request error.timeOut={} currTime={} reqTime={}", AkxProject.PLN, clientIp, timeOut, currentTime, requestTime);
}
}
} catch (Exception e) {
ctx.close();
logger.error(StringHelper.format("{} http request error.", AkxProject.PLN), e);
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project riposte by Nike-Inc.
the class NonblockingEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
// We only do something when the last chunk of content has arrived.
if (msg instanceof LastHttpContent) {
NonblockingEndpoint nonblockingEndpoint = ((NonblockingEndpoint) endpoint);
// We're supposed to execute the endpoint. There may be pre-endpoint-execution validation logic or
// other work that needs to happen before the endpoint is executed, so set up the
// CompletableFuture for the endpoint call to only execute if the pre-endpoint-execution
// validation/work chain is successful.
RequestInfo<?> requestInfo = state.getRequestInfo();
Span endpointExecutionSpan = findEndpointExecutionSpan(state);
CompletableFuture<ResponseInfo<?>> responseFuture = state.getPreEndpointExecutionWorkChain().thenCompose(doExecuteEndpointFunction(requestInfo, nonblockingEndpoint, endpointExecutionSpan, ctx));
// Register an on-completion callback so we can be notified when the CompletableFuture finishes.
responseFuture.whenComplete((responseInfo, throwable) -> {
// full details on why this needs to be done here.
if (endpointExecutionSpan != null && spanTaggingStrategy.shouldAddEndpointFinishAnnotation()) {
addEndpointFinishAnnotation(endpointExecutionSpan, spanTaggingStrategy);
}
// Kick off the response processing, depending on whether the result is an error or not.
if (throwable != null)
asyncErrorCallback(ctx, throwable);
else
asyncCallback(ctx, responseInfo);
});
// TODO: We might be able to put the timeout future in an if block in the case that the endpoint
// returned an already-completed future (i.e. if responseFuture.isDone() returns true at this
// point).
// Also schedule a timeout check with our Netty event loop to make sure we kill the
// CompletableFuture if it goes on too long.
Long endpointTimeoutOverride = nonblockingEndpoint.completableFutureTimeoutOverrideMillis();
long timeoutValueToUse = (endpointTimeoutOverride == null) ? defaultCompletableFutureTimeoutMillis : endpointTimeoutOverride;
ScheduledFuture<?> responseTimeoutScheduledFuture = ctx.channel().eventLoop().schedule(() -> {
if (!responseFuture.isDone()) {
runnableWithTracingAndMdc(() -> logger.error("A non-blocking endpoint's CompletableFuture did not finish within " + "the allotted timeout ({} milliseconds). Forcibly cancelling it.", timeoutValueToUse), ctx).run();
@SuppressWarnings("unchecked") Throwable errorToUse = nonblockingEndpoint.getCustomTimeoutExceptionCause(requestInfo, ctx);
if (errorToUse == null)
errorToUse = new NonblockingEndpointCompletableFutureTimedOut(timeoutValueToUse);
responseFuture.completeExceptionally(errorToUse);
}
}, timeoutValueToUse, TimeUnit.MILLISECONDS);
/*
The problem with the scheduled timeout check is that it holds on to the RequestInfo,
ChannelHandlerContext, and a bunch of other stuff that *should* become garbage the instant the
request finishes, but because of the timeout check it has to wait until the check executes
before the garbage is collectible. In high volume servers the default 60 second timeout is way
too long and acts like a memory leak and results in garbage collection thrashing if the
available memory can be filled within the 60 second timeout. To combat this we cancel the
timeout future when the endpoint future finishes. Netty will remove the cancelled timeout future
from its scheduled list within a short time, thus letting the garbage be collected.
*/
responseFuture.whenComplete((responseInfo, throwable) -> {
if (!responseTimeoutScheduledFuture.isDone())
responseTimeoutScheduledFuture.cancel(false);
});
}
// completes (see asyncCallback() and asyncErrorCallback()).
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
// error to be returned to the client.
return PipelineContinuationBehavior.CONTINUE;
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.LastHttpContent in project riposte by Nike-Inc.
the class RequestFilterHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof HttpRequest) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
handlerUtils.createRequestInfoFromNettyHttpRequestAndHandleStateSetupIfNecessary((HttpRequest) msg, state);
// If the Netty HttpRequest is invalid, we shouldn't process any of the filters.
handlerUtils.throwExceptionIfNotSuccessfullyDecoded((HttpRequest) msg);
// The HttpRequest is valid, so process the filters.
BiFunction<RequestAndResponseFilter, RequestInfo, RequestInfo> normalFilterCall = (filter, request) -> filter.filterRequestFirstChunkNoPayload(request, ctx);
BiFunction<RequestAndResponseFilter, RequestInfo, Pair<RequestInfo, Optional<ResponseInfo<?>>>> shortCircuitFilterCall = (filter, request) -> filter.filterRequestFirstChunkWithOptionalShortCircuitResponse(request, ctx);
return handleFilterLogic(ctx, msg, state, normalFilterCall, shortCircuitFilterCall);
}
if (msg instanceof LastHttpContent) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
BiFunction<RequestAndResponseFilter, RequestInfo, RequestInfo> normalFilterCall = (filter, request) -> filter.filterRequestLastChunkWithFullPayload(request, ctx);
BiFunction<RequestAndResponseFilter, RequestInfo, Pair<RequestInfo, Optional<ResponseInfo<?>>>> shortCircuitFilterCall = (filter, request) -> filter.filterRequestLastChunkWithOptionalShortCircuitResponse(request, ctx);
return handleFilterLogic(ctx, msg, state, normalFilterCall, shortCircuitFilterCall);
}
// Not the first or last chunk. No filters were executed, so continue normally.
return PipelineContinuationBehavior.CONTINUE;
}
Aggregations