Search in sources :

Example 81 with CompositeByteBuf

use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project spring-framework by spring-projects.

the class NettyDataBufferFactory method join.

/**
 * {@inheritDoc}
 * <p>This implementation uses Netty's {@link CompositeByteBuf}.
 */
@Override
public DataBuffer join(List<? extends DataBuffer> dataBuffers) {
    Assert.notEmpty(dataBuffers, "DataBuffer List must not be empty");
    int bufferCount = dataBuffers.size();
    if (bufferCount == 1) {
        return dataBuffers.get(0);
    }
    CompositeByteBuf composite = this.byteBufAllocator.compositeBuffer(bufferCount);
    for (DataBuffer dataBuffer : dataBuffers) {
        Assert.isInstanceOf(NettyDataBuffer.class, dataBuffer);
        composite.addComponent(true, ((NettyDataBuffer) dataBuffer).getNativeBuffer());
    }
    return new NettyDataBuffer(composite, this);
}
Also used : CompositeByteBuf(io.netty.buffer.CompositeByteBuf)

Example 82 with CompositeByteBuf

use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.

the class ChunkCreationHandler method encode.

@Override
protected void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("ChunkCreationHandler called with msg {} of size {} with chunkSize {}", msg, msg.readableBytes(), chunkSize);
    }
    if (!ctx.channel().isOpen()) {
        logger.debug("Channel closed, skipping encode inside {}.", RpcConstants.CHUNK_CREATION_HANDLER);
        msg.release();
        return;
    }
    // Calculate the number of chunks based on configured chunk size and input msg size
    int numChunks = (int) Math.ceil((double) msg.readableBytes() / chunkSize);
    // Initialize a composite buffer to hold numChunks chunk.
    final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(numChunks);
    int cbbWriteIndex = 0;
    int currentChunkLen = min(msg.readableBytes(), chunkSize);
    // Create slices of chunkSize from input msg and add it to the composite buffer.
    while (numChunks > 0) {
        final ByteBuf chunkBuf = msg.slice(msg.readerIndex(), currentChunkLen);
        chunkBuf.retain();
        cbb.addComponent(chunkBuf);
        cbbWriteIndex += currentChunkLen;
        msg.skipBytes(currentChunkLen);
        --numChunks;
        currentChunkLen = min(msg.readableBytes(), chunkSize);
    }
    // Update the writerIndex of composite byte buffer. Netty doesn't do it automatically.
    cbb.writerIndex(cbbWriteIndex);
    // Add the final composite bytebuf into output buffer.
    out.add(cbb);
}
Also used : CompositeByteBuf(io.netty.buffer.CompositeByteBuf) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) ByteBuf(io.netty.buffer.ByteBuf)

Example 83 with CompositeByteBuf

use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.

the class RpcEncoder method encode.

@Override
protected void encode(ChannelHandlerContext ctx, OutboundRpcMessage msg, List<Object> out) throws Exception {
    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("Rpc Encoder called with msg {}", msg);
    }
    if (!ctx.channel().isOpen()) {
        // output.add(ctx.alloc().buffer(0));
        logger.debug("Channel closed, skipping encode.");
        msg.release();
        return;
    }
    try {
        if (RpcConstants.EXTRA_DEBUGGING) {
            logger.debug("Encoding outbound message {}", msg);
        }
        // first we build the RpcHeader
        RpcHeader header = // 
        RpcHeader.newBuilder().setMode(// 
        msg.mode).setCoordinationId(// 
        msg.coordinationId).setRpcType(msg.rpcType).build();
        // figure out the full length
        int headerLength = header.getSerializedSize();
        int protoBodyLength = msg.pBody.getSerializedSize();
        int rawBodyLength = msg.getRawBodySize();
        // 
        int fullLength = // 
        HEADER_TAG_LENGTH + getRawVarintSize(headerLength) + headerLength + PROTOBUF_BODY_TAG_LENGTH + getRawVarintSize(protoBodyLength) + // 
        protoBodyLength;
        if (rawBodyLength > 0) {
            fullLength += (RAW_BODY_TAG_LENGTH + getRawVarintSize(rawBodyLength) + rawBodyLength);
        }
        ByteBuf buf = ctx.alloc().buffer();
        OutputStream os = new ByteBufOutputStream(buf);
        CodedOutputStream cos = CodedOutputStream.newInstance(os);
        // write full length first (this is length delimited stream).
        cos.writeRawVarint32(fullLength);
        // write header
        cos.writeRawVarint32(HEADER_TAG);
        cos.writeRawVarint32(headerLength);
        header.writeTo(cos);
        // write protobuf body length and body
        cos.writeRawVarint32(PROTOBUF_BODY_TAG);
        cos.writeRawVarint32(protoBodyLength);
        msg.pBody.writeTo(cos);
        // if exists, write data body and tag.
        if (msg.getRawBodySize() > 0) {
            if (RpcConstants.EXTRA_DEBUGGING) {
                logger.debug("Writing raw body of size {}", msg.getRawBodySize());
            }
            cos.writeRawVarint32(RAW_BODY_TAG);
            cos.writeRawVarint32(rawBodyLength);
            // need to flush so that dbody goes after if cos is caching.
            cos.flush();
            final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(msg.dBodies.length + 1);
            cbb.addComponent(buf);
            int bufLength = buf.readableBytes();
            for (ByteBuf b : msg.dBodies) {
                cbb.addComponent(b);
                bufLength += b.readableBytes();
            }
            cbb.writerIndex(bufLength);
            out.add(cbb);
        } else {
            cos.flush();
            out.add(buf);
        }
        if (RpcConstants.SOME_DEBUGGING) {
            logger.debug("Wrote message length {}:{} bytes (head:body).  Message: " + msg, getRawVarintSize(fullLength), fullLength);
        }
        if (RpcConstants.EXTRA_DEBUGGING) {
            logger.debug("Sent message.  Ending writer index was {}.", buf.writerIndex());
        }
    } finally {
    // make sure to release Rpc Messages underlying byte buffers.
    // msg.release();
    }
}
Also used : CompositeByteBuf(io.netty.buffer.CompositeByteBuf) RpcHeader(org.apache.drill.exec.proto.GeneralRPCProtos.RpcHeader) ByteBufOutputStream(io.netty.buffer.ByteBufOutputStream) OutputStream(java.io.OutputStream) ByteBufOutputStream(io.netty.buffer.ByteBufOutputStream) CodedOutputStream(com.google.protobuf.CodedOutputStream) CodedOutputStream(com.google.protobuf.CodedOutputStream) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) ByteBuf(io.netty.buffer.ByteBuf)

Example 84 with CompositeByteBuf

use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project drill by apache.

the class SaslEncryptionHandler method encode.

public void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {
    if (!ctx.channel().isOpen()) {
        logger.debug("In " + RpcConstants.SASL_ENCRYPTION_HANDLER + " and channel is not open. " + "So releasing msg memory before encryption.");
        msg.release();
        return;
    }
    try {
        // If encryption is enabled then this handler will always get ByteBuf of type Composite ByteBuf
        assert (msg instanceof CompositeByteBuf);
        final CompositeByteBuf cbb = (CompositeByteBuf) msg;
        final int numComponents = cbb.numComponents();
        // Get all the components inside the Composite ByteBuf for encryption
        for (int currentIndex = 0; currentIndex < numComponents; ++currentIndex) {
            final ByteBuf component = cbb.component(currentIndex);
            // will break the RPC message into chunks of wrapSizeLimit.
            if (component.readableBytes() > wrapSizeLimit) {
                throw new RpcException(String.format("Component Chunk size: %d is greater than the wrapSizeLimit: %d", component.readableBytes(), wrapSizeLimit));
            }
            // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
            // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
            // addition also remove the allocation of origMsgBuffer from constructor.
            /*if (component.hasArray()) {
          origMsg = component.array();
        } else {

        if (RpcConstants.EXTRA_DEBUGGING) {
          logger.trace("The input bytebuf is not backed by a byte array so allocating a new one");
        }*/
            final byte[] origMsg = origMsgBuffer;
            component.getBytes(component.readerIndex(), origMsg, 0, component.readableBytes());
            if (logger.isTraceEnabled()) {
                logger.trace("Trying to encrypt chunk of size:{} with wrapSizeLimit:{}", component.readableBytes(), wrapSizeLimit);
            }
            // Length to encrypt will be component length not origMsg length since that can be greater.
            final byte[] wrappedMsg = saslCodec.wrap(origMsg, 0, component.readableBytes());
            if (logger.isTraceEnabled()) {
                logger.trace("Successfully encrypted message, original size: {} Final Size: {}", component.readableBytes(), wrappedMsg.length);
            }
            // Allocate the buffer (directByteBuff) for copying the encrypted byte array and 4 octets for length of the
            // encrypted message. This is preferred since later on if the passed buffer is not in direct memory then it
            // will be copied by the channel into a temporary direct memory which will be cached to the thread. The size
            // of that temporary direct memory will be size of largest message send.
            final ByteBuf encryptedBuf = ctx.alloc().buffer(wrappedMsg.length + RpcConstants.LENGTH_FIELD_LENGTH);
            // Based on SASL RFC 2222/4422 we should have starting 4 octet as the length of the encrypted buffer in network
            // byte order. SASL framework provided by JDK doesn't do that by default and leaves it upto application. Whereas
            // Cyrus SASL implementation of sasl_encode does take care of this.
            lengthOctets.putInt(wrappedMsg.length);
            encryptedBuf.writeBytes(lengthOctets.array());
            // reset the position for re-use in next round
            lengthOctets.rewind();
            // Write the encrypted bytes inside the buffer
            encryptedBuf.writeBytes(wrappedMsg);
            // Update the msg and component reader index
            msg.skipBytes(component.readableBytes());
            component.skipBytes(component.readableBytes());
            // Add the encrypted buffer into the output to send it on wire.
            out.add(encryptedBuf);
        }
    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error("Something went wrong while wrapping the message: {} with MaxRawWrapSize: {}, " + "and error: {}", msg, wrapSizeLimit, e.getMessage());
        throw e;
    }
}
Also used : CompositeByteBuf(io.netty.buffer.CompositeByteBuf) IOException(java.io.IOException) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) ByteBuf(io.netty.buffer.ByteBuf) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException)

Example 85 with CompositeByteBuf

use of org.apache.flink.shaded.netty4.io.netty.buffer.CompositeByteBuf in project reactor-netty by reactor.

the class HttpServerPostFormTests method doTestPostForm.

private void doTestPostForm(HttpServer server, HttpClient client, Consumer<HttpServerFormDecoderProvider.Builder> provider, boolean configOnServer, boolean multipart, boolean streaming, @Nullable String expectedResponse) throws Exception {
    AtomicReference<List<HttpData>> originalHttpData1 = new AtomicReference<>(new ArrayList<>());
    AtomicReference<List<HttpData>> originalHttpData2 = new AtomicReference<>(new ArrayList<>());
    AtomicReference<Map<String, CompositeByteBuf>> copiedHttpData = new AtomicReference<>(new HashMap<>());
    server = (configOnServer ? server.httpFormDecoder(provider) : server).handle((req, res) -> res.sendString((configOnServer ? req.receiveForm() : req.receiveForm(provider)).flatMap(data -> {
        if ("0".equals(req.path())) {
            originalHttpData1.get().add(data);
            if (streaming) {
                CompositeByteBuf copy = copiedHttpData.get().computeIfAbsent(data.getName(), k -> Unpooled.compositeBuffer());
                try {
                    // In case of streaming this is not a blocking call
                    copy.writeBytes(data.get());
                } catch (IOException e) {
                    return Mono.error(e);
                }
            }
        } else {
            originalHttpData2.get().add(data);
        }
        return Mono.just('[' + data.getName() + ' ' + data.getClass().getSimpleName() + ' ' + data.isCompleted() + "] ");
    }).onErrorResume(t -> Mono.just(t.getCause().getMessage())).log()));
    disposableServer = server.bindNow();
    List<Tuple2<Integer, String>> responses;
    Path file = Paths.get(getClass().getResource("/largeFile1.txt").toURI());
    responses = Flux.range(0, 2).flatMap(i -> client.port(disposableServer.port()).post().uri("/" + i).sendForm((req, form) -> form.multipart(multipart).file("test1", "largeFile1.txt", file.toFile(), null).attr("attr1", "attr2").file("test2", "largeFile1.txt", file.toFile(), null)).responseSingle((r, buf) -> buf.asString().map(s -> Tuples.of(r.status().code(), s)))).collectList().block(Duration.ofSeconds(30));
    assertThat(responses).as("response").isNotNull();
    for (Tuple2<Integer, String> response : responses) {
        assertThat(response.getT1()).as("status code").isEqualTo(200);
        if (expectedResponse != null) {
            assertThat(response.getT2()).as("response body reflecting request").contains(expectedResponse);
        }
    }
    assertThat(originalHttpData1.get()).allMatch(data -> data.refCnt() == 0);
    assertThat(originalHttpData2.get()).allMatch(data -> data.refCnt() == 0);
    if (streaming) {
        if (expectedResponse == null) {
            assertThat(copiedHttpData.get()).hasSize(3);
            byte[] fileBytes = Files.readAllBytes(file);
            testContent(copiedHttpData.get().get("test1"), fileBytes);
            testContent(copiedHttpData.get().get("test2"), fileBytes);
            copiedHttpData.get().forEach((s, buffer) -> buffer.release());
        } else {
            List<HttpProtocol> serverProtocols = Arrays.asList(server.configuration().protocols());
            if (serverProtocols.size() == 1 && serverProtocols.get(0).equals(HttpProtocol.HTTP11)) {
                assertThat(copiedHttpData.get()).hasSize(1);
                copiedHttpData.get().forEach((s, buffer) -> buffer.release());
            } else {
                assertThat(copiedHttpData.get()).hasSize(0);
            }
        }
    }
}
Also used : Arrays(java.util.Arrays) HttpProtocol(reactor.netty.http.HttpProtocol) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Http2SslContextSpec(reactor.netty.http.Http2SslContextSpec) Tuples(reactor.util.function.Tuples) BaseHttpTest(reactor.netty.BaseHttpTest) Tuple2(reactor.util.function.Tuple2) HashMap(java.util.HashMap) Nullable(reactor.util.annotation.Nullable) AtomicReference(java.util.concurrent.atomic.AtomicReference) Unpooled(io.netty.buffer.Unpooled) Retention(java.lang.annotation.Retention) ArrayList(java.util.ArrayList) InsecureTrustManagerFactory(io.netty.handler.ssl.util.InsecureTrustManagerFactory) HttpData(io.netty.handler.codec.http.multipart.HttpData) Duration(java.time.Duration) Map(java.util.Map) Path(java.nio.file.Path) MethodSource(org.junit.jupiter.params.provider.MethodSource) Files(java.nio.file.Files) SelfSignedCertificate(io.netty.handler.ssl.util.SelfSignedCertificate) Mono(reactor.core.publisher.Mono) IOException(java.io.IOException) Target(java.lang.annotation.Target) ElementType(java.lang.annotation.ElementType) Consumer(java.util.function.Consumer) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) Flux(reactor.core.publisher.Flux) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Paths(java.nio.file.Paths) HttpClient(reactor.netty.http.client.HttpClient) RetentionPolicy(java.lang.annotation.RetentionPolicy) Path(java.nio.file.Path) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) HttpProtocol(reactor.netty.http.HttpProtocol) Tuple2(reactor.util.function.Tuple2) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

CompositeByteBuf (io.netty.buffer.CompositeByteBuf)86 ByteBuf (io.netty.buffer.ByteBuf)65 IOException (java.io.IOException)11 ArrayList (java.util.ArrayList)10 Test (org.junit.Test)8 ByteBuffer (java.nio.ByteBuffer)7 ChannelFuture (io.netty.channel.ChannelFuture)6 Channel (io.netty.channel.Channel)5 ChannelFutureListener (io.netty.channel.ChannelFutureListener)5 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)5 EmbeddedChannel (io.netty.channel.embedded.EmbeddedChannel)4 Test (org.junit.jupiter.api.Test)4 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)3 CodecException (io.netty.handler.codec.CodecException)3 SelfSignedCertificate (io.netty.handler.ssl.util.SelfSignedCertificate)3 InetSocketAddress (java.net.InetSocketAddress)3 ClosedChannelException (java.nio.channels.ClosedChannelException)3 List (java.util.List)3 ExecutionException (java.util.concurrent.ExecutionException)3 AtomicReference (java.util.concurrent.atomic.AtomicReference)3