use of org.apache.flink.shaded.netty4.io.netty.handler.codec.TooLongFrameException in project reactor-netty by reactor.
the class HttpServerOperations method sendDecodingFailures.
static void sendDecodingFailures(ChannelHandlerContext ctx, ConnectionObserver listener, boolean secure, Throwable t, Object msg) {
Connection conn = Connection.from(ctx.channel());
Throwable cause = t.getCause() != null ? t.getCause() : t;
if (log.isWarnEnabled()) {
log.warn(format(ctx.channel(), "Decoding failed: " + msg + " : "), cause);
}
ReferenceCountUtil.release(msg);
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, cause instanceof TooLongFrameException ? HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE : HttpResponseStatus.BAD_REQUEST);
response.headers().setInt(HttpHeaderNames.CONTENT_LENGTH, 0).set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
HttpRequest request = null;
if (msg instanceof HttpRequest) {
request = (HttpRequest) msg;
}
listener.onStateChange(new FailedHttpServerRequest(conn, listener, request, response, secure), REQUEST_DECODING_FAILED);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.TooLongFrameException in project teiid by teiid.
the class ObjectDecoder method decode.
@Override
protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
if (result == null) {
ByteBuf frame = null;
try {
frame = (ByteBuf) super.decode(ctx, buffer);
} catch (TooLongFrameException e) {
throw new IOException(RuntimePlugin.Util.gs(RuntimePlugin.Event.TEIID40166), e);
}
if (frame == null) {
return null;
}
CompactObjectInputStream cois = new CompactObjectInputStream(new ByteBufInputStream(frame), classLoader);
result = cois.readObject();
streams = ExternalizeUtil.readList(cois, StreamFactoryReference.class);
streamIndex = 0;
}
while (streamIndex < streams.size()) {
// read the new chunk size
if (streamDataToRead == -1) {
if (buffer.readableBytes() < 2) {
return null;
}
streamDataToRead = buffer.readUnsignedShort();
}
if (stream == null) {
// $NON-NLS-1$
store = storageManager.createFileStore("temp-stream");
StreamFactoryReference sfr = streams.get(streamIndex);
sfr.setStreamFactory(new FileStoreInputStreamFactory(store, Streamable.ENCODING));
this.stream = new BufferedOutputStream(store.createOutputStream());
}
// end of stream
if (streamDataToRead == 0) {
stream.close();
stream = null;
streamIndex++;
streamDataToRead = -1;
continue;
}
if (store.getLength() + streamDataToRead > maxLobSize) {
if (error == null) {
error = new StreamCorruptedException(// $NON-NLS-1$ //$NON-NLS-2$
"lob too big: " + (store.getLength() + streamDataToRead) + " (max: " + maxLobSize + ')');
}
}
int toRead = Math.min(buffer.readableBytes(), streamDataToRead);
if (toRead == 0) {
return null;
}
if (error == null) {
buffer.readBytes(this.stream, toRead);
} else {
buffer.skipBytes(toRead);
}
streamDataToRead -= toRead;
if (streamDataToRead == 0) {
// get the next chunk
streamDataToRead = -1;
}
}
Object toReturn = result;
result = null;
streams = null;
stream = null;
store = null;
if (error != null) {
StreamCorruptedException sce = error;
error = null;
throw sce;
}
return toReturn;
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.TooLongFrameException in project iep by Netflix.
the class NetflixJsonObjectDecoder method decode.
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
if (state == ST_CORRUPTED) {
in.skipBytes(in.readableBytes());
return;
}
if (LOGGER.isTraceEnabled()) {
byte[] bytes = new byte[in.readableBytes()];
in.getBytes(in.readerIndex(), bytes, 0, in.readableBytes());
LOGGER.trace("starting [" + in.readerIndex() + ":" + in.readableBytes() + "]:" + new String(bytes));
}
// index of next byte to process.
int len = this.len;
int wrtIdx = in.writerIndex();
for (; in.readerIndex() + len < wrtIdx; len++) {
if (len > maxObjectLength) {
// buffer size exceeded maxObjectLength; discarding the complete buffer.
in.skipBytes(in.readableBytes());
reset();
throw new TooLongFrameException("object length exceeds " + maxObjectLength + ": " + len + " bytes discarded");
}
byte c = in.getByte(in.readerIndex() + len);
if (state == ST_DECODING_NORMAL) {
decodeByte(c, in, in.readerIndex() + len);
// that the JSON object/array is complete.
if (openBraces == 0) {
ByteBuf json = extractObject(ctx, in, in.readerIndex(), len + 1);
if (json != null) {
out.add(json);
}
// The JSON object/array was extracted => discard the bytes from
// the input buffer.
in.readerIndex(in.readerIndex() + len + 1);
len = 0;
// Reset the object state to get ready for the next JSON object/text
// coming along the byte stream.
reset();
break;
}
} else if (state == ST_DECODING_ARRAY_STREAM) {
if (len == 0 && Character.isWhitespace(c)) {
in.skipBytes(1);
len--;
}
decodeByte(c, in, in.readerIndex() + len);
if (!insideString && (openBraces == 1 && c == ',' || openBraces == 0 && c == ']')) {
ByteBuf json = extractObject(ctx, in, in.readerIndex(), len);
if (json != null) {
out.add(json);
}
in.readerIndex(in.readerIndex() + len + 1);
len = 0;
if (c == ']') {
reset();
}
break;
}
// JSON object/array detected. Accumulate bytes until all braces/brackets are closed.
} else if (c == '{' || c == '[') {
initDecoding(c);
if (state == ST_DECODING_ARRAY_STREAM) {
// Discard the array bracket
in.skipBytes(1);
len--;
}
// Discard leading spaces in front of a JSON object/array.
} else if (Character.isWhitespace(c)) {
in.skipBytes(1);
len--;
} else {
state = ST_CORRUPTED;
throw new CorruptedFrameException("invalid JSON received at byte position " + (in.readerIndex() + len) + ": " + ByteBufUtil.hexDump(in));
}
}
this.len = len;
if (LOGGER.isTraceEnabled()) {
byte[] bytes = new byte[in.readableBytes()];
in.getBytes(in.readerIndex(), bytes, 0, in.readableBytes());
LOGGER.trace("remainder [" + in.readerIndex() + ":" + in.readableBytes() + "]:" + new String(bytes));
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.TooLongFrameException in project netty by netty.
the class WebSocket00FrameDecoder method decodeBinaryFrame.
private WebSocketFrame decodeBinaryFrame(ChannelHandlerContext ctx, byte type, ByteBuf buffer) {
long frameSize = 0;
int lengthFieldSize = 0;
byte b;
do {
b = buffer.readByte();
frameSize <<= 7;
frameSize |= b & 0x7f;
if (frameSize > maxFrameSize) {
throw new TooLongFrameException();
}
lengthFieldSize++;
if (lengthFieldSize > 8) {
// Perhaps a malicious peer?
throw new TooLongFrameException();
}
} while ((b & 0x80) == 0x80);
if (type == (byte) 0xFF && frameSize == 0) {
receivedClosingHandshake = true;
return new CloseWebSocketFrame(true, 0, ctx.alloc().buffer(0));
}
ByteBuf payload = readBytes(ctx.alloc(), buffer, (int) frameSize);
return new BinaryWebSocketFrame(payload);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.TooLongFrameException in project netty by netty.
the class WebSocket08FrameEncoder method encode.
@Override
protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, List<Object> out) throws Exception {
final ByteBuf data = msg.content();
byte[] mask;
byte opcode;
if (msg instanceof TextWebSocketFrame) {
opcode = OPCODE_TEXT;
} else if (msg instanceof PingWebSocketFrame) {
opcode = OPCODE_PING;
} else if (msg instanceof PongWebSocketFrame) {
opcode = OPCODE_PONG;
} else if (msg instanceof CloseWebSocketFrame) {
opcode = OPCODE_CLOSE;
} else if (msg instanceof BinaryWebSocketFrame) {
opcode = OPCODE_BINARY;
} else if (msg instanceof ContinuationWebSocketFrame) {
opcode = OPCODE_CONT;
} else {
throw new UnsupportedOperationException("Cannot encode frame of type: " + msg.getClass().getName());
}
int length = data.readableBytes();
if (logger.isTraceEnabled()) {
logger.trace("Encoding WebSocket Frame opCode={} length={}", opcode, length);
}
int b0 = 0;
if (msg.isFinalFragment()) {
b0 |= 1 << 7;
}
b0 |= msg.rsv() % 8 << 4;
b0 |= opcode % 128;
if (opcode == OPCODE_PING && length > 125) {
throw new TooLongFrameException("invalid payload for PING (payload length must be <= 125, was " + length);
}
boolean release = true;
ByteBuf buf = null;
try {
int maskLength = maskPayload ? 4 : 0;
if (length <= 125) {
int size = 2 + maskLength;
if (maskPayload || length <= GATHERING_WRITE_THRESHOLD) {
size += length;
}
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
byte b = (byte) (maskPayload ? 0x80 | (byte) length : (byte) length);
buf.writeByte(b);
} else if (length <= 0xFFFF) {
int size = 4 + maskLength;
if (maskPayload || length <= GATHERING_WRITE_THRESHOLD) {
size += length;
}
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
buf.writeByte(maskPayload ? 0xFE : 126);
buf.writeByte(length >>> 8 & 0xFF);
buf.writeByte(length & 0xFF);
} else {
int size = 10 + maskLength;
if (maskPayload || length <= GATHERING_WRITE_THRESHOLD) {
size += length;
}
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
buf.writeByte(maskPayload ? 0xFF : 127);
buf.writeLong(length);
}
// Write payload
if (maskPayload) {
int random = PlatformDependent.threadLocalRandom().nextInt(Integer.MAX_VALUE);
mask = ByteBuffer.allocate(4).putInt(random).array();
buf.writeBytes(mask);
ByteOrder srcOrder = data.order();
ByteOrder dstOrder = buf.order();
int counter = 0;
int i = data.readerIndex();
int end = data.writerIndex();
if (srcOrder == dstOrder) {
// Use the optimized path only when byte orders match
// Remark: & 0xFF is necessary because Java will do signed expansion from
// byte to int which we don't want.
int intMask = ((mask[0] & 0xFF) << 24) | ((mask[1] & 0xFF) << 16) | ((mask[2] & 0xFF) << 8) | (mask[3] & 0xFF);
// into the same format, because getInt() and writeInt() will use a reversed byte order
if (srcOrder == ByteOrder.LITTLE_ENDIAN) {
intMask = Integer.reverseBytes(intMask);
}
for (; i + 3 < end; i += 4) {
int intData = data.getInt(i);
buf.writeInt(intData ^ intMask);
}
}
for (; i < end; i++) {
byte byteData = data.getByte(i);
buf.writeByte(byteData ^ mask[counter++ % 4]);
}
out.add(buf);
} else {
if (buf.writableBytes() >= data.readableBytes()) {
// merge buffers as this is cheaper then a gathering write if the payload is small enough
buf.writeBytes(data);
out.add(buf);
} else {
out.add(buf);
out.add(data.retain());
}
}
release = false;
} finally {
if (release && buf != null) {
buf.release();
}
}
}
Aggregations