use of io.helidon.common.LazyValue in project helidon by oracle.
the class BackpressureTest method overloadEventLoop.
/**
* Attempts to overload webserver subscriber with higher data flow than Netty's NioEventLoop can
* send at first iteration. By causing incomplete write leaves the rest of the bytebuffer to be written by the next
* event loop iteration.
* <p>
* This can overflow Netty buffer or, in case of single threaded unbounded request, prevent event loop from ever reaching next
* iteration.
* <p>
* Incomplete write is not flushed and its ChannelFuture's listener isn't executed, leaving DataChunk NOT released.
* That should lead to OutOfMemory error or assertion error in sample DataChunk batch,
* depends on the JVM memory settings.
*
* @param multi publisher providing endless stream of high volume(preferably more than 2 MB but not less than 1264 kB) data chunks
*/
void overloadEventLoop(Multi<DataChunk> multi) {
AtomicBoolean firstChunk = new AtomicBoolean(true);
AtomicBoolean shuttingDown = new AtomicBoolean(false);
AtomicReference<Optional<Throwable>> serverUpstreamError = new AtomicReference<>(Optional.empty());
List<DataChunk> firstBatch = new ArrayList<>(5);
Multi<DataChunk> dataChunkMulti = // Kill server publisher when client is done
multi.takeWhile(ch -> !shuttingDown.get()).peek(chunk -> {
if (firstChunk.getAndSet(false)) {
// skip first chunk, it gets released on complete
return;
}
// Keep 2 - 6 chunk references
if (firstBatch.size() < 5) {
firstBatch.add(chunk);
}
}).onError(Throwable::printStackTrace).onError(t -> serverUpstreamError.set(Optional.of(t)));
AtomicLong byteCnt = new AtomicLong();
LazyValue<Boolean> validateOnce = LazyValue.create(() -> {
Collection<DataChunk> snapshot = Collections.unmodifiableCollection(firstBatch);
LOGGER.info("======== DataChunk sample batch ========");
IntStream.range(0, snapshot.size()).forEach(i -> LOGGER.info("Chunk #" + (i + 2) + " released: " + firstBatch.get(i).isReleased()));
boolean result = firstBatch.stream().allMatch(DataChunk::isReleased);
// clean up
firstBatch.forEach(DataChunk::release);
return result;
});
WebServer webServer = null;
try {
webServer = WebServer.builder().host("localhost").routing(Routing.builder().get("/", (req, res) -> res.send(dataChunkMulti)).build()).build().start().await(TIMEOUT_SEC, TimeUnit.SECONDS);
WebClient.builder().baseUri("http://localhost:" + webServer.port()).build().get().path("/").request().peek(res -> assertThat(res.status().reasonPhrase(), res.status().code(), is(200))).flatMap(WebClientResponse::content).takeWhile(ws -> byteCnt.get() < (300 * 1024 * 1024)).forEach(chunk -> {
long actCnt = byteCnt.addAndGet(chunk.bytes().length);
if (actCnt % (100 * 1024 * 1024) == 0) {
LOGGER.info("Client received " + (actCnt / (1024 * 1024)) + "MB");
}
if (actCnt > (200 * 1024 * 1024)) {
// After 200 MB check fist 5 chunks if those are released
// but keep the pressure and don't kill the stream
assertThat("Not all chunks from the first batch are released!", validateOnce.get());
}
chunk.release();
}).onTerminate(() -> shuttingDown.set(true)).await(TIMEOUT_SEC, TimeUnit.SECONDS);
} finally {
if (webServer != null) {
webServer.shutdown().await(TIMEOUT_SEC, TimeUnit.SECONDS);
}
}
serverUpstreamError.get().ifPresent(Assertions::fail);
}
Aggregations