use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project spring-framework by spring-projects.
the class NettyDataBufferFactory method join.
/**
* {@inheritDoc}
* <p>This implementation uses Netty's {@link CompositeByteBuf}.
*/
@Override
public DataBuffer join(List<? extends DataBuffer> dataBuffers) {
Assert.notEmpty(dataBuffers, "DataBuffer List must not be empty");
int bufferCount = dataBuffers.size();
if (bufferCount == 1) {
return dataBuffers.get(0);
}
CompositeByteBuf composite = this.byteBufAllocator.compositeBuffer(bufferCount);
for (DataBuffer dataBuffer : dataBuffers) {
Assert.isInstanceOf(NettyDataBuffer.class, dataBuffer);
composite.addComponent(true, ((NettyDataBuffer) dataBuffer).getNativeBuffer());
}
return new NettyDataBuffer(composite, this);
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.
the class ChunkCreationHandler method encode.
@Override
protected void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("ChunkCreationHandler called with msg {} of size {} with chunkSize {}", msg, msg.readableBytes(), chunkSize);
}
if (!ctx.channel().isOpen()) {
logger.debug("Channel closed, skipping encode inside {}.", RpcConstants.CHUNK_CREATION_HANDLER);
msg.release();
return;
}
// Calculate the number of chunks based on configured chunk size and input msg size
int numChunks = (int) Math.ceil((double) msg.readableBytes() / chunkSize);
// Initialize a composite buffer to hold numChunks chunk.
final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(numChunks);
int cbbWriteIndex = 0;
int currentChunkLen = min(msg.readableBytes(), chunkSize);
// Create slices of chunkSize from input msg and add it to the composite buffer.
while (numChunks > 0) {
final ByteBuf chunkBuf = msg.slice(msg.readerIndex(), currentChunkLen);
chunkBuf.retain();
cbb.addComponent(chunkBuf);
cbbWriteIndex += currentChunkLen;
msg.skipBytes(currentChunkLen);
--numChunks;
currentChunkLen = min(msg.readableBytes(), chunkSize);
}
// Update the writerIndex of composite byte buffer. Netty doesn't do it automatically.
cbb.writerIndex(cbbWriteIndex);
// Add the final composite bytebuf into output buffer.
out.add(cbb);
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.
the class RpcEncoder method encode.
@Override
protected void encode(ChannelHandlerContext ctx, OutboundRpcMessage msg, List<Object> out) throws Exception {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Rpc Encoder called with msg {}", msg);
}
if (!ctx.channel().isOpen()) {
// output.add(ctx.alloc().buffer(0));
logger.debug("Channel closed, skipping encode.");
msg.release();
return;
}
try {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Encoding outbound message {}", msg);
}
// first we build the RpcHeader
RpcHeader header = //
RpcHeader.newBuilder().setMode(//
msg.mode).setCoordinationId(//
msg.coordinationId).setRpcType(msg.rpcType).build();
// figure out the full length
int headerLength = header.getSerializedSize();
int protoBodyLength = msg.pBody.getSerializedSize();
int rawBodyLength = msg.getRawBodySize();
//
int fullLength = //
HEADER_TAG_LENGTH + getRawVarintSize(headerLength) + headerLength + PROTOBUF_BODY_TAG_LENGTH + getRawVarintSize(protoBodyLength) + //
protoBodyLength;
if (rawBodyLength > 0) {
fullLength += (RAW_BODY_TAG_LENGTH + getRawVarintSize(rawBodyLength) + rawBodyLength);
}
ByteBuf buf = ctx.alloc().buffer();
OutputStream os = new ByteBufOutputStream(buf);
CodedOutputStream cos = CodedOutputStream.newInstance(os);
// write full length first (this is length delimited stream).
cos.writeRawVarint32(fullLength);
// write header
cos.writeRawVarint32(HEADER_TAG);
cos.writeRawVarint32(headerLength);
header.writeTo(cos);
// write protobuf body length and body
cos.writeRawVarint32(PROTOBUF_BODY_TAG);
cos.writeRawVarint32(protoBodyLength);
msg.pBody.writeTo(cos);
// if exists, write data body and tag.
if (msg.getRawBodySize() > 0) {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Writing raw body of size {}", msg.getRawBodySize());
}
cos.writeRawVarint32(RAW_BODY_TAG);
cos.writeRawVarint32(rawBodyLength);
// need to flush so that dbody goes after if cos is caching.
cos.flush();
final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(msg.dBodies.length + 1);
cbb.addComponent(buf);
int bufLength = buf.readableBytes();
for (ByteBuf b : msg.dBodies) {
cbb.addComponent(b);
bufLength += b.readableBytes();
}
cbb.writerIndex(bufLength);
out.add(cbb);
} else {
cos.flush();
out.add(buf);
}
if (RpcConstants.SOME_DEBUGGING) {
logger.debug("Wrote message length {}:{} bytes (head:body). Message: " + msg, getRawVarintSize(fullLength), fullLength);
}
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Sent message. Ending writer index was {}.", buf.writerIndex());
}
} finally {
// make sure to release Rpc Messages underlying byte buffers.
// msg.release();
}
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.
the class SaslEncryptionHandler method encode.
public void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {
if (!ctx.channel().isOpen()) {
logger.debug("In " + RpcConstants.SASL_ENCRYPTION_HANDLER + " and channel is not open. " + "So releasing msg memory before encryption.");
msg.release();
return;
}
try {
// If encryption is enabled then this handler will always get ByteBuf of type Composite ByteBuf
assert (msg instanceof CompositeByteBuf);
final CompositeByteBuf cbb = (CompositeByteBuf) msg;
final int numComponents = cbb.numComponents();
// Get all the components inside the Composite ByteBuf for encryption
for (int currentIndex = 0; currentIndex < numComponents; ++currentIndex) {
final ByteBuf component = cbb.component(currentIndex);
// will break the RPC message into chunks of wrapSizeLimit.
if (component.readableBytes() > wrapSizeLimit) {
throw new RpcException(String.format("Component Chunk size: %d is greater than the wrapSizeLimit: %d", component.readableBytes(), wrapSizeLimit));
}
// Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
// DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
// addition also remove the allocation of origMsgBuffer from constructor.
/*if (component.hasArray()) {
origMsg = component.array();
} else {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.trace("The input bytebuf is not backed by a byte array so allocating a new one");
}*/
final byte[] origMsg = origMsgBuffer;
component.getBytes(component.readerIndex(), origMsg, 0, component.readableBytes());
if (logger.isTraceEnabled()) {
logger.trace("Trying to encrypt chunk of size:{} with wrapSizeLimit:{}", component.readableBytes(), wrapSizeLimit);
}
// Length to encrypt will be component length not origMsg length since that can be greater.
final byte[] wrappedMsg = saslCodec.wrap(origMsg, 0, component.readableBytes());
if (logger.isTraceEnabled()) {
logger.trace("Successfully encrypted message, original size: {} Final Size: {}", component.readableBytes(), wrappedMsg.length);
}
// Allocate the buffer (directByteBuff) for copying the encrypted byte array and 4 octets for length of the
// encrypted message. This is preferred since later on if the passed buffer is not in direct memory then it
// will be copied by the channel into a temporary direct memory which will be cached to the thread. The size
// of that temporary direct memory will be size of largest message send.
final ByteBuf encryptedBuf = ctx.alloc().buffer(wrappedMsg.length + RpcConstants.LENGTH_FIELD_LENGTH);
// Based on SASL RFC 2222/4422 we should have starting 4 octet as the length of the encrypted buffer in network
// byte order. SASL framework provided by JDK doesn't do that by default and leaves it upto application. Whereas
// Cyrus SASL implementation of sasl_encode does take care of this.
lengthOctets.putInt(wrappedMsg.length);
encryptedBuf.writeBytes(lengthOctets.array());
// reset the position for re-use in next round
lengthOctets.rewind();
// Write the encrypted bytes inside the buffer
encryptedBuf.writeBytes(wrappedMsg);
// Update the msg and component reader index
msg.skipBytes(component.readableBytes());
component.skipBytes(component.readableBytes());
// Add the encrypted buffer into the output to send it on wire.
out.add(encryptedBuf);
}
} catch (OutOfMemoryException e) {
logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
msg.resetReaderIndex();
outOfMemoryHandler.handle();
} catch (IOException e) {
logger.error("Something went wrong while wrapping the message: {} with MaxRawWrapSize: {}, " + "and error: {}", msg, wrapSizeLimit, e.getMessage());
throw e;
}
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project reactor-netty by reactor.
the class HttpServerPostFormTests method doTestPostForm.
private void doTestPostForm(HttpServer server, HttpClient client, Consumer<HttpServerFormDecoderProvider.Builder> provider, boolean configOnServer, boolean multipart, boolean streaming, @Nullable String expectedResponse) throws Exception {
AtomicReference<List<HttpData>> originalHttpData1 = new AtomicReference<>(new ArrayList<>());
AtomicReference<List<HttpData>> originalHttpData2 = new AtomicReference<>(new ArrayList<>());
AtomicReference<Map<String, CompositeByteBuf>> copiedHttpData = new AtomicReference<>(new HashMap<>());
server = (configOnServer ? server.httpFormDecoder(provider) : server).handle((req, res) -> res.sendString((configOnServer ? req.receiveForm() : req.receiveForm(provider)).flatMap(data -> {
if ("0".equals(req.path())) {
originalHttpData1.get().add(data);
if (streaming) {
CompositeByteBuf copy = copiedHttpData.get().computeIfAbsent(data.getName(), k -> Unpooled.compositeBuffer());
try {
// In case of streaming this is not a blocking call
copy.writeBytes(data.get());
} catch (IOException e) {
return Mono.error(e);
}
}
} else {
originalHttpData2.get().add(data);
}
return Mono.just('[' + data.getName() + ' ' + data.getClass().getSimpleName() + ' ' + data.isCompleted() + "] ");
}).onErrorResume(t -> Mono.just(t.getCause().getMessage())).log()));
disposableServer = server.bindNow();
List<Tuple2<Integer, String>> responses;
Path file = Paths.get(getClass().getResource("/largeFile1.txt").toURI());
responses = Flux.range(0, 2).flatMap(i -> client.port(disposableServer.port()).post().uri("/" + i).sendForm((req, form) -> form.multipart(multipart).file("test1", "largeFile1.txt", file.toFile(), null).attr("attr1", "attr2").file("test2", "largeFile1.txt", file.toFile(), null)).responseSingle((r, buf) -> buf.asString().map(s -> Tuples.of(r.status().code(), s)))).collectList().block(Duration.ofSeconds(30));
assertThat(responses).as("response").isNotNull();
for (Tuple2<Integer, String> response : responses) {
assertThat(response.getT1()).as("status code").isEqualTo(200);
if (expectedResponse != null) {
assertThat(response.getT2()).as("response body reflecting request").contains(expectedResponse);
}
}
assertThat(originalHttpData1.get()).allMatch(data -> data.refCnt() == 0);
assertThat(originalHttpData2.get()).allMatch(data -> data.refCnt() == 0);
if (streaming) {
if (expectedResponse == null) {
assertThat(copiedHttpData.get()).hasSize(3);
byte[] fileBytes = Files.readAllBytes(file);
testContent(copiedHttpData.get().get("test1"), fileBytes);
testContent(copiedHttpData.get().get("test2"), fileBytes);
copiedHttpData.get().forEach((s, buffer) -> buffer.release());
} else {
List<HttpProtocol> serverProtocols = Arrays.asList(server.configuration().protocols());
if (serverProtocols.size() == 1 && serverProtocols.get(0).equals(HttpProtocol.HTTP11)) {
assertThat(copiedHttpData.get()).hasSize(1);
copiedHttpData.get().forEach((s, buffer) -> buffer.release());
} else {
assertThat(copiedHttpData.get()).hasSize(0);
}
}
}
}
Aggregations