use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class MultiPartDecoder method releaseChunks.
private void releaseChunks() {
Iterator<DataChunk> it = chunksByIds.values().iterator();
while (it.hasNext()) {
DataChunk next = it.next();
next.release();
it.remove();
}
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class ReadableByteChannelPublisher method publishSingleOrFinish.
/**
* It publish a single item or complete or both. If next item is not yet available but it can be in the future then returns
* {@code false} and call will be rescheduled based on {@link RetrySchema}.
*
* @param subscr a subscriber to publish on
* @return {@code true} if next item was published or subscriber was completed otherwise {@code false}
* @throws Exception if any error happens and {@code onError()} must be called on the subscriber
*/
private boolean publishSingleOrFinish(Flow.Subscriber<? super DataChunk> subscr) throws Exception {
DataChunk chunk;
if (currentChunk == null) {
chunk = allocateNewChunk();
} else {
chunk = currentChunk;
currentChunk = null;
}
ByteBuffer bb = chunk.data()[0];
int count = 0;
while (bb.remaining() > 0) {
count = channel.read(bb);
if (count <= 0) {
break;
}
}
// Send or store
if (bb.capacity() > bb.remaining()) {
bb.flip();
subscr.onNext(chunk);
} else {
currentChunk = chunk;
}
// Last or not
if (count < 0) {
try {
channel.close();
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Cannot close readable byte channel! (Close attempt after fully read channel.)", e);
}
tryComplete();
if (currentChunk != null) {
currentChunk.release();
}
return true;
} else {
return count > 0;
}
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class JsonpBodyStreamWriter method write.
@Override
public Multi<DataChunk> write(Publisher<? extends JsonStructure> publisher, GenericType<? extends JsonStructure> type, MessageBodyWriterContext context) {
MediaType contentType = context.findAccepted(MediaType.JSON_PREDICATE, MediaType.APPLICATION_JSON);
context.contentType(contentType);
// we do not have join operator
AtomicBoolean first = new AtomicBoolean(true);
JsonStructureToChunks jsonToChunks = new JsonStructureToChunks(true, jsonWriterFactory, context.charset());
return Multi.create(publisher).map(jsonToChunks).flatMap(it -> {
if (first.getAndSet(false)) {
// first record, do not prepend a comma
return Multi.just(DataChunk.create(ARRAY_JSON_BEGIN_BYTES), it);
} else {
// any subsequent record starts with a comma
return Multi.just(DataChunk.create(COMMA_BYTES), it);
}
}).onCompleteResume(DataChunk.create(ARRAY_JSON_END_BYTES));
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class JsonbNdBodyStreamWriter method write.
@Override
public Multi<DataChunk> write(Flow.Publisher<?> publisher, GenericType<?> type, MessageBodyWriterContext context) {
MediaType contentType = MediaType.APPLICATION_X_NDJSON;
context.contentType(contentType);
AtomicBoolean first = new AtomicBoolean(true);
return Multi.create(publisher).map(object -> DataChunk.create(jsonb.toJson(object).getBytes(StandardCharsets.UTF_8))).flatMap(dataChunk -> {
if (first.getAndSet(false)) {
return Single.just(dataChunk);
} else {
return Multi.just(DataChunk.create(NL), dataChunk);
}
});
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class ObjectStorageService method upload.
private void upload(ServerRequest req, ServerResponse res) {
OptionalLong contentLength = req.headers().contentLength();
if (contentLength.isEmpty()) {
req.content().forEach(DataChunk::release);
res.status(Http.Status.BAD_REQUEST_400).send("Content length must be defined");
return;
}
String objectName = req.path().param("file-name");
PutObject.Request request = PutObject.Request.builder().objectName(objectName).bucket(bucketName).contentLength(contentLength.getAsLong());
req.headers().contentType().ifPresent(request::requestMediaType);
objectStorage.putObject(request, req.content()).forSingle(response -> res.send(response.requestId())).exceptionally(res::send);
}
Aggregations