use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class DataChunkInputStream method read.
@Override
public int read(byte[] buf, int off, int len) throws IOException {
if (subscribed.compareAndSet(false, true)) {
// subscribe for first time
originalPublisher.subscribe(new DataChunkSubscriber());
}
if (current == null) {
throw new IOException("The input stream has been closed");
}
try {
// block until data is available
DataChunk chunk = current.get();
if (chunk == null) {
return -1;
}
ByteBuffer[] currentBuffers = chunk.data();
int count = 0;
while (bufferIndex < currentBuffers.length) {
if (bufferIndex == 0 && currentBuffers[bufferIndex].position() == 0) {
LOGGER.finest(() -> "Reading chunk ID: " + chunk.id());
}
int rem = currentBuffers[bufferIndex].remaining();
int blen = len;
if (blen > rem) {
blen = rem;
}
currentBuffers[bufferIndex].get(buf, off, blen);
off += blen;
count += blen;
len -= blen;
if (rem > blen) {
break;
}
// consumed and unconsumed.
if (bufferIndex == currentBuffers.length - 1) {
releaseChunk(chunk, null);
current = next;
bufferIndex = 0;
subscription.request(1);
break;
}
bufferIndex++;
}
return count;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class BareResponseImpl method writeStatusAndHeaders.
@Override
public void writeStatusAndHeaders(Http.ResponseStatus status, Map<String, List<String>> headers) {
Objects.requireNonNull(status, "Parameter 'statusCode' was null!");
if (!statusHeadersSent.compareAndSet(false, true)) {
throw new IllegalStateException("Status and headers were already sent");
}
HttpResponseStatus nettyStatus;
if (status instanceof Http.Status || status.reasonPhrase() == null) {
// default reason phrase
nettyStatus = valueOf(status.code());
} else {
// custom reason phrase
nettyStatus = valueOf(status.code(), status.reasonPhrase());
}
response = new DefaultHttpResponse(HTTP_1_1, nettyStatus);
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
response.headers().add(headerEntry.getKey(), headerEntry.getValue());
}
// Copy HTTP/2 headers to response for correlation (streamId)
requestHeaders.names().stream().filter(header -> header.startsWith(HTTP_2_HEADER_PREFIX)).forEach(header -> response.headers().add(header, requestHeaders.get(header)));
// Check if WebSocket upgrade
boolean isUpgrade = isWebSocketUpgrade(status, headers);
if (isUpgrade) {
isWebSocketUpgrade = true;
} else {
// Set chunked if length not set, may switch to length later
boolean lengthSet = HttpUtil.isContentLengthSet(response);
if (!lengthSet) {
lengthOptimization = status.code() == Http.Status.OK_200.code() && !HttpUtil.isTransferEncodingChunked(response) && !isSseEventStream(headers);
HttpUtil.setTransferEncodingChunked(response, true);
}
}
// if response Connection header is set explicitly to close, we can ignore the following
if (!keepAlive || HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(response.headers().get(HttpHeaderNames.CONNECTION))) {
response.headers().remove(HttpHeaderNames.CONNECTION);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
} else {
if (!requestContext.requestCompleted()) {
LOGGER.finer(() -> log("Request content not fully read with keep-alive: true", channel));
if (!isWebSocketUpgrade) {
if (requestContext.isDataRequested()) {
// there are pending requests, we have emitted some data and request was not explicitly canceled
// this is a bug in code, where entity is requested and not fully processed
// throwing an exception here is a breaking change (also this may be an intermittent problem
// as it may depend on thread race)
HttpRequest request = requestContext.request();
LOGGER.warning("Entity was requested and not fully consumed before a response is sent. " + "This is not supported. Connection will be closed. Please fix your route for " + request.method() + " " + request.uri());
// let's close this connection, as it is in an unexpected state
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
} else {
// we want to consume the entity and keep alive
// entity must be consumed here, so we do not close connection in forwarding handler
// because of unconsumed payload (the following code will only succeed if there is no subscriber)
requestContext.publisher().forEach(DataChunk::release).onComplete(() -> {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE_ON_FAILURE);
}).onError(t -> {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
originalEntityAnalyzed.complete(ChannelFutureListener.CLOSE);
}).ignoreElement();
}
}
} else if (!headers.containsKey(HttpHeaderNames.CONNECTION.toString())) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
}
// Content length optimization attempt
if (!lengthOptimization) {
requestEntityAnalyzed = requestEntityAnalyzed.thenApply(listener -> {
LOGGER.fine(() -> log("Writing headers %s", status));
requestContext.runInScope(() -> orderedWrite(this::initWriteResponse));
return listener;
});
}
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class MediaPublisher method create.
/**
* Creates a publisher of single {@link String string}.
*
* @param publishedType a type. If contains charset then it is used, otherwise use {@code UTF-8}. If {@code null} then
* {@code text/plain} is used as a default.
* @param charSequence A sequence to publish.
* @return new publisher.
*/
static MediaPublisher create(MediaType publishedType, CharSequence charSequence) {
ByteBuffer data = Optional.ofNullable(publishedType).flatMap(MediaType::charset).map(Charset::forName).orElse(StandardCharsets.UTF_8).encode(charSequence.toString());
Flow.Publisher<DataChunk> publisher = Multi.singleton(DataChunk.create(data));
return new MediaPublisher() {
@Override
public MediaType mediaType() {
return publishedType;
}
@Override
public void subscribe(Flow.Subscriber<? super DataChunk> subscriber) {
publisher.subscribe(subscriber);
}
};
}
Aggregations