Search in sources :

Example 41 with DataChunk

use of io.helidon.common.http.DataChunk in project helidon by oracle.

the class DataChunkInputStream method read.

@Override
public int read(byte[] buf, int off, int len) throws IOException {
    if (subscribed.compareAndSet(false, true)) {
        // subscribe for first time
        originalPublisher.subscribe(new DataChunkSubscriber());
    }
    if (current == null) {
        throw new IOException("The input stream has been closed");
    }
    try {
        // block until data is available
        DataChunk chunk = current.get();
        if (chunk == null) {
            return -1;
        }
        ByteBuffer[] currentBuffers = chunk.data();
        int count = 0;
        while (bufferIndex < currentBuffers.length) {
            if (bufferIndex == 0 && currentBuffers[bufferIndex].position() == 0) {
                LOGGER.finest(() -> "Reading chunk ID: " + chunk.id());
            }
            int rem = currentBuffers[bufferIndex].remaining();
            int blen = len;
            if (blen > rem) {
                blen = rem;
            }
            currentBuffers[bufferIndex].get(buf, off, blen);
            off += blen;
            count += blen;
            len -= blen;
            if (rem > blen) {
                break;
            }
            // consumed and unconsumed.
            if (bufferIndex == currentBuffers.length - 1) {
                releaseChunk(chunk, null);
                current = next;
                bufferIndex = 0;
                subscription.request(1);
                break;
            }
            bufferIndex++;
        }
        return count;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    }
}
Also used : DataChunk(io.helidon.common.http.DataChunk) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ByteBuffer(java.nio.ByteBuffer)

Example 42 with DataChunk

use of io.helidon.common.http.DataChunk in project helidon by oracle.

the class BareResponseImpl method writeStatusAndHeaders.

@Override
public void writeStatusAndHeaders(Http.ResponseStatus status, Map<String, List<String>> headers) {
    Objects.requireNonNull(status, "Parameter 'statusCode' was null!");
    if (!statusHeadersSent.compareAndSet(false, true)) {
        throw new IllegalStateException("Status and headers were already sent");
    }
    HttpResponseStatus nettyStatus;
    if (status instanceof Http.Status || status.reasonPhrase() == null) {
        // default reason phrase
        nettyStatus = valueOf(status.code());
    } else {
        // custom reason phrase
        nettyStatus = valueOf(status.code(), status.reasonPhrase());
    }
    response = new DefaultHttpResponse(HTTP_1_1, nettyStatus);
    for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
        response.headers().add(headerEntry.getKey(), headerEntry.getValue());
    }
    // Copy HTTP/2 headers to response for correlation (streamId)
    requestHeaders.names().stream().filter(header -> header.startsWith(HTTP_2_HEADER_PREFIX)).forEach(header -> response.headers().add(header, requestHeaders.get(header)));
    // Check if WebSocket upgrade
    boolean isUpgrade = isWebSocketUpgrade(status, headers);
    if (isUpgrade) {
        isWebSocketUpgrade = true;
    } else {
        // Set chunked if length not set, may switch to length later
        boolean lengthSet = HttpUtil.isContentLengthSet(response);
        if (!lengthSet) {
            lengthOptimization = status.code() == Http.Status.OK_200.code() && !HttpUtil.isTransferEncodingChunked(response) && !isSseEventStream(headers);
            HttpUtil.setTransferEncodingChunked(response, true);
        }
    }
    // if response Connection header is set explicitly to close, we can ignore the following
    if (!keepAlive || HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(response.headers().get(HttpHeaderNames.CONNECTION))) {
        response.headers().remove(HttpHeaderNames.CONNECTION);
        originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
    } else {
        if (!requestContext.requestCompleted()) {
            LOGGER.finer(() -> log("Request content not fully read with keep-alive: true", channel));
            if (!isWebSocketUpgrade) {
                if (requestContext.isDataRequested()) {
                    // there are pending requests, we have emitted some data and request was not explicitly canceled
                    // this is a bug in code, where entity is requested and not fully processed
                    // throwing an exception here is a breaking change (also this may be an intermittent problem
                    // as it may depend on thread race)
                    HttpRequest request = requestContext.request();
                    LOGGER.warning("Entity was requested and not fully consumed before a response is sent. " + "This is not supported. Connection will be closed. Please fix your route for " + request.method() + " " + request.uri());
                    // let's close this connection, as it is in an unexpected state
                    response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
                    originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
                } else {
                    // we want to consume the entity and keep alive
                    // entity must be consumed here, so we do not close connection in forwarding handler
                    // because of unconsumed payload (the following code will only succeed if there is no subscriber)
                    requestContext.publisher().forEach(DataChunk::release).onComplete(() -> {
                        response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
                        originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE_ON_FAILURE);
                    }).onError(t -> {
                        response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
                        originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
                    }).ignoreElement();
                }
            }
        } else if (!headers.containsKey(HttpHeaderNames.CONNECTION.toString())) {
            response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
        }
    }
    // Content length optimization attempt
    if (!lengthOptimization) {
        requestEntityAnalyzed = requestEntityAnalyzed.thenApply(listener -> {
            LOGGER.fine(() -> log("Writing headers %s", status));
            requestContext.runInScope(() -> orderedWrite(this::initWriteResponse));
            return listener;
        });
    }
}
Also used : Arrays(java.util.Arrays) HttpHeaders(io.netty.handler.codec.http.HttpHeaders) DataChunk(io.helidon.common.http.DataChunk) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) Level(java.util.logging.Level) Unpooled(io.netty.buffer.Unpooled) LastHttpContent(io.netty.handler.codec.http.LastHttpContent) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) ByteBuf(io.netty.buffer.ByteBuf) Flow(java.util.concurrent.Flow) Map(java.util.Map) ChannelFutureListener(io.netty.channel.ChannelFutureListener) Single(io.helidon.common.reactive.Single) HTTP_1_1(io.netty.handler.codec.http.HttpVersion.HTTP_1_1) Http(io.helidon.common.http.Http) HttpResponseStatus.valueOf(io.netty.handler.codec.http.HttpResponseStatus.valueOf) HttpRequest(io.netty.handler.codec.http.HttpRequest) HttpHeaderValues(io.netty.handler.codec.http.HttpHeaderValues) GenericFutureListener(io.netty.util.concurrent.GenericFutureListener) HttpResponseStatus(io.netty.handler.codec.http.HttpResponseStatus) Logger(java.util.logging.Logger) ChannelFuture(io.netty.channel.ChannelFuture) Objects(java.util.Objects) DefaultHttpContent(io.netty.handler.codec.http.DefaultHttpContent) List(java.util.List) DefaultLastHttpContent(io.netty.handler.codec.http.DefaultLastHttpContent) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) HttpHeaderNames(io.netty.handler.codec.http.HttpHeaderNames) Future(io.netty.util.concurrent.Future) HttpUtil(io.netty.handler.codec.http.HttpUtil) HttpRequest(io.netty.handler.codec.http.HttpRequest) HttpResponseStatus(io.netty.handler.codec.http.HttpResponseStatus) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) Http(io.helidon.common.http.Http) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map)

Example 43 with DataChunk

use of io.helidon.common.http.DataChunk in project helidon by oracle.

the class MediaPublisher method create.

/**
 * Creates a publisher of single {@link String string}.
 *
 * @param publishedType a type. If contains charset then it is used, otherwise use {@code UTF-8}. If {@code null} then
 *                     {@code text/plain} is used as a default.
 * @param charSequence A sequence to publish.
 * @return new publisher.
 */
static MediaPublisher create(MediaType publishedType, CharSequence charSequence) {
    ByteBuffer data = Optional.ofNullable(publishedType).flatMap(MediaType::charset).map(Charset::forName).orElse(StandardCharsets.UTF_8).encode(charSequence.toString());
    Flow.Publisher<DataChunk> publisher = Multi.singleton(DataChunk.create(data));
    return new MediaPublisher() {

        @Override
        public MediaType mediaType() {
            return publishedType;
        }

        @Override
        public void subscribe(Flow.Subscriber<? super DataChunk> subscriber) {
            publisher.subscribe(subscriber);
        }
    };
}
Also used : Charset(java.nio.charset.Charset) DataChunk(io.helidon.common.http.DataChunk) ByteBuffer(java.nio.ByteBuffer) Flow(java.util.concurrent.Flow)

Aggregations

DataChunk (io.helidon.common.http.DataChunk)43 Test (org.junit.jupiter.api.Test)18 Multi (io.helidon.common.reactive.Multi)12 Flow (java.util.concurrent.Flow)11 GenericType (io.helidon.common.GenericType)9 CompletableFuture (java.util.concurrent.CompletableFuture)9 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Http (io.helidon.common.http.Http)8 Single (io.helidon.common.reactive.Single)8 ByteBuffer (java.nio.ByteBuffer)8 MediaType (io.helidon.common.http.MediaType)7 MessageBodyWriterContext (io.helidon.media.common.MessageBodyWriterContext)7 WebClient (io.helidon.webclient.WebClient)7 WebClientResponse (io.helidon.webclient.WebClientResponse)7 IOException (java.io.IOException)7 List (java.util.List)7 Optional (java.util.Optional)7 TimeUnit (java.util.concurrent.TimeUnit)7 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)7 Logger (java.util.logging.Logger)6