use of io.helidon.common.reactive.Multi in project helidon by oracle.
the class ServerHealthCheckIT method testHttpHealth.
/**
* Read and check DB Client health status from Helidon Web Server.
*
* @throws InterruptedException if the current thread was interrupted
* @throws IOException if an I/O error occurs when sending or receiving HTTP request
*/
@Test
public void testHttpHealth() throws IOException, InterruptedException {
// Call select-pokemons to warm up server
Multi<DbRow> rows = DB_CLIENT.execute(exec -> exec.namedQuery("select-pokemons"));
List<DbRow> pokemonList = rows.collectList().await();
// Read and process health check response
String response = get(URL + "/health");
LOGGER.info("RESPONSE: " + response);
JsonStructure jsonResponse = null;
try (JsonReader jr = Json.createReader(new StringReader(response))) {
jsonResponse = jr.read();
} catch (JsonParsingException | IllegalStateException ex) {
fail(String.format("Error parsing response: %s", ex.getMessage()));
}
JsonArray checks = jsonResponse.asJsonObject().getJsonArray("checks");
assertThat(checks.size(), greaterThan(0));
checks.stream().map((check) -> {
String name = check.asJsonObject().getString("name");
return check;
}).forEachOrdered((check) -> {
String state = check.asJsonObject().getString("state");
String status = check.asJsonObject().getString("status");
assertThat(state, equalTo("UP"));
assertThat(status, equalTo("UP"));
});
}
use of io.helidon.common.reactive.Multi in project helidon by oracle.
the class RetryTest method testMultiLastDelay.
@Test
public void testMultiLastDelay() throws InterruptedException {
List<Long> lastDelayCalls = new ArrayList<>();
Retry retry = Retry.builder().retryPolicy((firstCallMillis, lastDelay, call) -> {
lastDelayCalls.add(lastDelay);
return Optional.of(lastDelay + 1);
}).build();
AtomicInteger count = new AtomicInteger();
TestSubscriber ts = new TestSubscriber();
Multi<Integer> multi = retry.invokeMulti(() -> {
if (count.getAndIncrement() < 3) {
return Multi.error(new RetryException());
} else {
return Multi.just(0, 1, 2);
}
});
multi.subscribe(ts);
ts.request(2);
ts.cdl.await(1, TimeUnit.SECONDS);
assertThat("Last delay should increase", lastDelayCalls, contains(0L, 1L, 2L));
}
use of io.helidon.common.reactive.Multi in project helidon by oracle.
the class ServerMetricsCheckIT method testHttpMetrics.
/**
* Read and check DB Client metrics from Helidon Web Server.
*
* @throws InterruptedException if the current thread was interrupted
* @throws IOException if an I/O error occurs when sending or receiving HTTP request
*/
@Test
public void testHttpMetrics() throws IOException, InterruptedException {
// Call select-pokemons to trigger it
Multi<DbRow> rows = DB_CLIENT.execute(exec -> exec.namedQuery("select-pokemons"));
List<DbRow> pokemonList = rows.collectList().await();
// Call insert-pokemon to trigger it
Pokemon pokemon = new Pokemon(BASE_ID + 1, "Lickitung", TYPES.get(1));
Long result = DB_CLIENT.execute(exec -> exec.namedInsert("insert-pokemon", pokemon.getId(), pokemon.getName())).await();
// Read and process metrics response
String response = get(URL + "/metrics/application");
LOGGER.info("RESPONSE: " + response);
JsonObject application = null;
try (JsonReader jr = Json.createReader(new StringReader(response))) {
application = jr.readObject();
} catch (JsonParsingException | IllegalStateException ex) {
fail(String.format("Error parsing response: %s", ex.getMessage()));
}
assertThat(application, notNullValue());
assertThat(application.getValueType(), equalTo(JsonValue.ValueType.OBJECT));
assertThat(application.size(), greaterThan(0));
assertThat(application.containsKey("db.counter.select-pokemons"), equalTo(true));
assertThat(application.containsKey("db.counter.insert-pokemon"), equalTo(true));
int selectPokemons = application.getInt("db.counter.select-pokemons");
int insertPokemons = application.getInt("db.counter.insert-pokemon");
assertThat(selectPokemons, equalTo(1));
assertThat(insertPokemons, equalTo(1));
assertThat(application.containsKey("db.timer.insert-pokemon"), equalTo(true));
JsonObject insertTimer = application.getJsonObject("db.timer.insert-pokemon");
assertThat(insertTimer.containsKey("count"), equalTo(true));
assertThat(insertTimer.containsKey("min"), equalTo(true));
assertThat(insertTimer.containsKey("max"), equalTo(true));
int timerCount = insertTimer.getInt("count");
assertThat(timerCount, equalTo(1));
}
use of io.helidon.common.reactive.Multi in project helidon by oracle.
the class HelidonReactiveStreamsEngine method build.
@SuppressWarnings({ "unchecked", "rawtypes" })
static Object build(Iterable<Stage> graph, Mode mode) throws UnsupportedStageException {
Flow.Subscriber graphInlet = null;
Multi result = null;
CompletionStage completion = null;
Iterator<Stage> stages = graph.iterator();
Stage stage = null;
if (stages.hasNext()) {
stage = stages.next();
}
// we'll patch in an identity processor here
if (mode == Mode.PROCESSOR || mode == Mode.SUBSCRIBER) {
if (stage == null || !((stage instanceof Stage.ProcessorStage) || (stage instanceof Stage.Coupled))) {
DeferredProcessor processor = new DeferredProcessor<>();
graphInlet = processor;
result = processor;
}
}
if (stage != null) {
boolean once = false;
for (; ; ) {
if (once) {
if (!stages.hasNext()) {
break;
}
stage = stages.next();
}
once = true;
if (stage instanceof Stage.PublisherStage) {
requireNullSource(result, stage);
Publisher publisher = ((Stage.PublisherStage) stage).getRsPublisher();
result = Multi.create(FlowAdapters.toFlowPublisher(publisher));
continue;
}
if (stage instanceof Stage.Of) {
requireNullSource(result, stage);
Iterable iterable = ((Stage.Of) stage).getElements();
result = Multi.create(iterable);
continue;
}
if (stage instanceof Stage.ProcessorStage) {
if (result == null) {
requireNullFront(graphInlet, stage);
// act as a source
Processor processor = ((Stage.ProcessorStage) stage).getRsProcessor();
graphInlet = FlowAdapters.toFlowSubscriber(processor);
result = Multi.create(FlowAdapters.toFlowPublisher(processor));
} else {
// act as a middle operator
Processor processor = ((Stage.ProcessorStage) stage).getRsProcessor();
// FIXME should this be deferred for when the downstream actually subscribes?
result = new DeferredViaProcessor(result, FlowAdapters.toFlowProcessor(processor));
}
continue;
}
if (stage instanceof Stage.Failed) {
requireNullSource(result, stage);
Throwable throwable = ((Stage.Failed) stage).getError();
result = Multi.error(throwable);
continue;
}
if (stage instanceof Stage.Concat) {
requireNullSource(result, stage);
Graph g1 = ((Stage.Concat) stage).getFirst();
Graph g2 = ((Stage.Concat) stage).getSecond();
result = Multi.concat(FlowAdapters.toFlowPublisher((Publisher) build(g1.getStages(), Mode.PUBLISHER)), FlowAdapters.toFlowPublisher((Publisher) build(g2.getStages(), Mode.PUBLISHER)));
continue;
}
if (stage instanceof Stage.FromCompletionStage) {
requireNullSource(result, stage);
CompletionStage cs = ((Stage.FromCompletionStage) stage).getCompletionStage();
result = Multi.create(cs);
continue;
}
if (stage instanceof Stage.FromCompletionStageNullable) {
requireNullSource(result, stage);
CompletionStage cs = ((Stage.FromCompletionStageNullable) stage).getCompletionStage();
result = Multi.create(cs, true);
continue;
}
if (stage instanceof Stage.Coupled) {
Stage.Coupled coupled = (Stage.Coupled) stage;
if (result == null) {
requireNullFront(graphInlet, stage);
}
Flow.Subscriber s = FlowAdapters.toFlowSubscriber(((SubscriberWithCompletionStage) build(coupled.getSubscriber().getStages(), Mode.SUBSCRIBER)).getSubscriber());
Multi f = Multi.create(FlowAdapters.toFlowPublisher((Publisher) build(coupled.getPublisher().getStages(), Mode.PUBLISHER)));
Flow.Processor processor = coupledBuildProcessor(s, f);
if (result == null) {
graphInlet = processor;
result = Multi.create(processor);
} else {
result = new DeferredViaProcessor(result, processor);
}
continue;
}
if (stage instanceof Stage.Map) {
requireSource(result, stage);
Function mapper = ((Stage.Map) stage).getMapper();
result = result.map(mapper::apply);
continue;
}
if (stage instanceof Stage.Peek) {
requireSource(result, stage);
Consumer consumer = ((Stage.Peek) stage).getConsumer();
result = result.peek(consumer);
continue;
}
if (stage instanceof Stage.Filter) {
requireSource(result, stage);
Predicate predicate = ((Stage.Filter) stage).getPredicate();
result = result.filter(predicate);
continue;
}
if (stage instanceof Stage.DropWhile) {
requireSource(result, stage);
Predicate predicate = ((Stage.DropWhile) stage).getPredicate();
result = result.dropWhile(predicate);
continue;
}
if (stage instanceof Stage.Skip) {
requireSource(result, stage);
long n = ((Stage.Skip) stage).getSkip();
result = result.skip(n);
continue;
}
if (stage instanceof Stage.Limit) {
requireSource(result, stage);
long n = ((Stage.Limit) stage).getLimit();
result = result.limit(n);
continue;
}
if (stage instanceof Stage.Distinct) {
requireSource(result, stage);
result = result.distinct();
continue;
}
if (stage instanceof Stage.TakeWhile) {
requireSource(result, stage);
Predicate predicate = ((Stage.TakeWhile) stage).getPredicate();
result = result.takeWhile(predicate);
continue;
}
if (stage instanceof Stage.FlatMap) {
requireSource(result, stage);
Function mapper = ((Stage.FlatMap) stage).getMapper();
// FIXME dedicated concatMap
result = result.flatMap(v -> new MultiNullGuard<>(FlowAdapters.toFlowPublisher((Publisher) build(((Graph) mapper.apply(v)).getStages(), Mode.PUBLISHER))), 1, false, Flow.defaultBufferSize());
continue;
}
if (stage instanceof Stage.FlatMapCompletionStage) {
requireSource(result, stage);
Function mapper = ((Stage.FlatMapCompletionStage) stage).getMapper();
// FIXME dedicated concatMap
result = result.flatMap(v -> Multi.create((CompletionStage) mapper.apply(v)), 1, false, 1);
continue;
}
if (stage instanceof Stage.FlatMapIterable) {
requireSource(result, stage);
Function mapper = ((Stage.FlatMapIterable) stage).getMapper();
result = result.flatMapIterable(mapper);
continue;
}
if (stage instanceof Stage.OnError) {
requireSource(result, stage);
Consumer consumer = ((Stage.OnError) stage).getConsumer();
result = result.onError(consumer);
continue;
}
if (stage instanceof Stage.OnTerminate) {
requireSource(result, stage);
Runnable runnable = ((Stage.OnTerminate) stage).getAction();
result = result.onTerminate(runnable);
continue;
}
if (stage instanceof Stage.OnComplete) {
requireSource(result, stage);
Runnable runnable = ((Stage.OnComplete) stage).getAction();
result = result.onComplete(runnable);
continue;
}
if (stage instanceof Stage.OnErrorResume) {
requireSource(result, stage);
Function mapper = ((Stage.OnErrorResume) stage).getFunction();
result = result.onErrorResume(mapper);
continue;
}
if (stage instanceof Stage.OnErrorResumeWith) {
requireSource(result, stage);
Function mapper = ((Stage.OnErrorResumeWith) stage).getFunction();
result = result.onErrorResumeWith(e -> {
Graph g = (Graph) mapper.apply(e);
return FlowAdapters.toFlowPublisher((Publisher) build(g.getStages(), Mode.PUBLISHER));
});
continue;
}
if (stage instanceof Stage.FindFirst) {
if (mode == Mode.SUBSCRIBER || mode == Mode.COMPLETION) {
if (graphInlet != null) {
requireSource(result, stage);
requireNullTerminal(completion, stage);
}
BasicFindFirstSubscriber cs = new BasicFindFirstSubscriber();
completion = cs.completable();
if (result != null) {
result.subscribe(cs);
} else {
graphInlet = cs;
}
continue;
}
throw new IllegalArgumentException("Stage.FindFirst is only supported when building via buildSubscriber or buildCompletion");
}
if (stage instanceof Stage.SubscriberStage) {
if (mode == Mode.SUBSCRIBER || mode == Mode.COMPLETION) {
if (graphInlet != null) {
requireSource(result, stage);
requireNullTerminal(completion, stage);
}
Subscriber s = ((Stage.SubscriberStage) stage).getRsSubscriber();
BasicCompletionSubscriber cs = new BasicCompletionSubscriber(FlowAdapters.toFlowSubscriber(s));
completion = cs.completable();
if (result != null) {
result.subscribe(cs);
} else {
graphInlet = cs;
}
continue;
}
throw new IllegalArgumentException("Stage.FindFirst is only supported when building via buildSubscriber or buildCompletion");
}
if (stage instanceof Stage.Collect) {
if (mode == Mode.SUBSCRIBER || mode == Mode.COMPLETION) {
if (graphInlet != null) {
requireSource(result, stage);
requireNullTerminal(completion, stage);
}
Stage.Collect collect = (Stage.Collect) stage;
BasicCollectSubscriber cs = new BasicCollectSubscriber(collect.getCollector());
completion = cs.completable();
if (result != null) {
result.subscribe(cs);
} else {
graphInlet = cs;
}
continue;
}
throw new IllegalArgumentException("Stage.FindFirst is only supported when building via buildSubscriber or buildCompletion");
}
if (stage instanceof Stage.Cancel) {
if (mode == Mode.SUBSCRIBER || mode == Mode.COMPLETION) {
if (graphInlet != null) {
requireSource(result, stage);
requireNullTerminal(completion, stage);
}
BasicCancelSubscriber cs = new BasicCancelSubscriber();
completion = cs.completable();
if (result != null) {
result.subscribe(cs);
} else {
graphInlet = cs;
}
continue;
}
throw new IllegalArgumentException("Stage.FindFirst is only supported when building via buildSubscriber or buildCompletion");
}
throw new UnsupportedStageException(stage);
}
}
if (mode == Mode.PUBLISHER) {
if (result == null) {
throw new IllegalArgumentException("The graph had no usable stages for building a Publisher.");
}
return FlowAdapters.toPublisher(result);
}
if (mode == Mode.PROCESSOR) {
if (graphInlet == null || result == null) {
throw new IllegalArgumentException("The graph had no usable stages for building a Processor.");
}
return FlowAdapters.toProcessor(new BridgeProcessor(graphInlet, result));
}
if (mode == Mode.COMPLETION) {
if (completion == null) {
throw new IllegalArgumentException("The graph had no usable stages for building a CompletionStage.");
}
return completion;
}
if (graphInlet == null || completion == null) {
throw new IllegalArgumentException("The graph had no usable stages for building a Subscriber.");
}
return new InnerSubscriberWithCompletionStage(graphInlet, completion);
}
use of io.helidon.common.reactive.Multi in project helidon by oracle.
the class BackpressureTest method overloadEventLoop.
/**
* Attempts to overload webserver subscriber with higher data flow than Netty's NioEventLoop can
* send at first iteration. By causing incomplete write leaves the rest of the bytebuffer to be written by the next
* event loop iteration.
* <p>
* This can overflow Netty buffer or, in case of single threaded unbounded request, prevent event loop from ever reaching next
* iteration.
* <p>
* Incomplete write is not flushed and its ChannelFuture's listener isn't executed, leaving DataChunk NOT released.
* That should lead to OutOfMemory error or assertion error in sample DataChunk batch,
* depends on the JVM memory settings.
*
* @param multi publisher providing endless stream of high volume(preferably more than 2 MB but not less than 1264 kB) data chunks
*/
void overloadEventLoop(Multi<DataChunk> multi) {
AtomicBoolean firstChunk = new AtomicBoolean(true);
AtomicBoolean shuttingDown = new AtomicBoolean(false);
AtomicReference<Optional<Throwable>> serverUpstreamError = new AtomicReference<>(Optional.empty());
List<DataChunk> firstBatch = new ArrayList<>(5);
Multi<DataChunk> dataChunkMulti = // Kill server publisher when client is done
multi.takeWhile(ch -> !shuttingDown.get()).peek(chunk -> {
if (firstChunk.getAndSet(false)) {
// skip first chunk, it gets released on complete
return;
}
// Keep 2 - 6 chunk references
if (firstBatch.size() < 5) {
firstBatch.add(chunk);
}
}).onError(Throwable::printStackTrace).onError(t -> serverUpstreamError.set(Optional.of(t)));
AtomicLong byteCnt = new AtomicLong();
LazyValue<Boolean> validateOnce = LazyValue.create(() -> {
Collection<DataChunk> snapshot = Collections.unmodifiableCollection(firstBatch);
LOGGER.info("======== DataChunk sample batch ========");
IntStream.range(0, snapshot.size()).forEach(i -> LOGGER.info("Chunk #" + (i + 2) + " released: " + firstBatch.get(i).isReleased()));
boolean result = firstBatch.stream().allMatch(DataChunk::isReleased);
// clean up
firstBatch.forEach(DataChunk::release);
return result;
});
WebServer webServer = null;
try {
webServer = WebServer.builder().host("localhost").routing(Routing.builder().get("/", (req, res) -> res.send(dataChunkMulti)).build()).build().start().await(TIMEOUT_SEC, TimeUnit.SECONDS);
WebClient.builder().baseUri("http://localhost:" + webServer.port()).build().get().path("/").request().peek(res -> assertThat(res.status().reasonPhrase(), res.status().code(), is(200))).flatMap(WebClientResponse::content).takeWhile(ws -> byteCnt.get() < (300 * 1024 * 1024)).forEach(chunk -> {
long actCnt = byteCnt.addAndGet(chunk.bytes().length);
if (actCnt % (100 * 1024 * 1024) == 0) {
LOGGER.info("Client received " + (actCnt / (1024 * 1024)) + "MB");
}
if (actCnt > (200 * 1024 * 1024)) {
// After 200 MB check fist 5 chunks if those are released
// but keep the pressure and don't kill the stream
assertThat("Not all chunks from the first batch are released!", validateOnce.get());
}
chunk.release();
}).onTerminate(() -> shuttingDown.set(true)).await(TIMEOUT_SEC, TimeUnit.SECONDS);
} finally {
if (webServer != null) {
webServer.shutdown().await(TIMEOUT_SEC, TimeUnit.SECONDS);
}
}
serverUpstreamError.get().ifPresent(Assertions::fail);
}
Aggregations