use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class MultiPartEncoderTest method testPartContentPublisherError.
@Test
public void testPartContentPublisherError() {
MultiPartEncoder encoder = MultiPartEncoder.create("boundary", MEDIA_CONTEXT.writerContext());
DataChunkSubscriber subscriber = new DataChunkSubscriber();
encoder.subscribe(subscriber);
Multi.just(WriteableBodyPart.builder().publisher(Multi.<DataChunk>error(new IllegalStateException("oops"))).build()).subscribe(encoder);
CompletableFuture<String> future = subscriber.content().toCompletableFuture();
assertThat(future.isCompletedExceptionally(), is(equalTo(true)));
try {
future.getNow(null);
fail("exception should be thrown");
} catch (CompletionException ex) {
assertThat(ex.getCause(), is(instanceOf(IllegalStateException.class)));
assertThat(ex.getCause().getMessage(), is(equalTo("oops")));
}
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class MultiPartEncoderTest method testRequests.
@Test
public void testRequests() throws Exception {
MultiPartEncoder enc = MultiPartEncoder.create("boundary", MEDIA_CONTEXT.writerContext());
Multi.create(LongStream.range(1, 500).mapToObj(i -> WriteableBodyPart.builder().entity("part" + i).build())).subscribe(enc);
final CountDownLatch latch = new CountDownLatch(3);
Subscriber<DataChunk> subscriber = new Subscriber<DataChunk>() {
@Override
public void onSubscribe(final Flow.Subscription subscription) {
subscription.request(3L);
}
@Override
public void onNext(final DataChunk item) {
latch.countDown();
}
@Override
public void onComplete() {
}
@Override
public void onError(Throwable throwable) {
}
};
enc.subscribe(subscriber);
waitOnLatch(latch);
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class ConnectionCloseTest method testCutConnection.
@Test
void testCutConnection() throws ExecutionException, InterruptedException, TimeoutException {
WebClient webClient = createNewClient();
CompletableFuture<Throwable> actualErrorCf = new CompletableFuture<>();
// Expecting WebClientException: Connection reset by the host
webClient.get().path("/connectionClose").request().flatMap(WebClientResponse::content).map(DataChunk::bytes).map(String::new).log().onError(actualErrorCf::complete).ignoreElements();
Throwable actual = actualErrorCf.get(10, TimeUnit.SECONDS);
assertThat(actual, Matchers.instanceOf(WebClientException.class));
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class MockZipkinService method mockZipkin.
private void mockZipkin(final ServerRequest request, final ServerResponse response) {
request.queryParams().all("serviceName").forEach(s -> System.out.println(">>>" + s));
request.content().registerReader(new MessageBodyStreamReader<JsonValue>() {
@Override
public PredicateResult accept(final GenericType<?> type, final MessageBodyReaderContext context) {
return PredicateResult.COMPATIBLE;
}
@Override
@SuppressWarnings("unchecked")
public <U extends JsonValue> Flow.Publisher<U> read(final Flow.Publisher<DataChunk> publisher, final GenericType<U> type, final MessageBodyReaderContext context) {
return (Flow.Publisher<U>) Multi.create(publisher).map(d -> ByteBuffer.wrap(d.bytes())).reduce((buf, buf2) -> ByteBuffer.allocate(buf.capacity() + buf2.capacity()).put(buf.array()).put(buf2.array())).flatMap(b -> {
try (ByteArrayInputStream bais = new ByteArrayInputStream(b.array());
GZIPInputStream gzipInputStream = new GZIPInputStream(bais)) {
return Single.just(Json.createReader(new StringReader(new String(gzipInputStream.readAllBytes()))).readArray());
} catch (EOFException e) {
// ignore
return Multi.empty();
} catch (IOException e) {
throw new RuntimeException(e);
}
}).flatMap(a -> Multi.create(a.stream()));
}
}).asStream(JsonValue.class).map(JsonValue::asJsonObject).filter(json -> TAGS_POINTER.containsValue(json) && COMPONENT_POINTER.containsValue(json) && filteredComponents.stream().anyMatch(s -> s.equals(((JsonString) COMPONENT_POINTER.getValue(json)).getString()))).onError(Throwable::printStackTrace).onError(t -> response.status(500).send(t)).onComplete(response::send).peek(json -> LOGGER.info(json.toString())).forEach(e -> next.getAndSet(new CompletableFuture<>()).complete(e));
}
use of io.helidon.common.http.DataChunk in project helidon by oracle.
the class KeepAliveTest method setUp.
@BeforeAll
static void setUp() {
LogConfig.configureRuntime();
server = WebServer.builder().routing(Routing.builder().register("/close", rules -> rules.any((req, res) -> {
req.content().forEach(dataChunk -> {
// consume only first from two chunks
dataChunk.release();
throw new RuntimeException("BOOM!");
}).exceptionally(res::send);
})).register("/plain", rules -> rules.any((req, res) -> {
req.content().forEach(DataChunk::release).onComplete(res::send).ignoreElement();
})).build()).build();
server.start().await();
String serverUrl = "http://localhost:" + server.port();
webClient = WebClient.builder().baseUri(serverUrl).keepAlive(true).build();
}
Aggregations