use of reactor.core.publisher.FluxProcessor in project spring-framework by spring-projects.
the class ReactiveAdapterRegistryTests method getAdapterForReactiveSubType.
@Test
public void getAdapterForReactiveSubType() throws Exception {
ReactiveAdapter adapter1 = getAdapter(Flux.class);
ReactiveAdapter adapter2 = getAdapter(FluxProcessor.class);
assertSame(adapter1, adapter2);
this.registry.registerReactiveType(ReactiveTypeDescriptor.multiValue(FluxProcessor.class, FluxProcessor::empty), o -> (FluxProcessor<?, ?>) o, FluxProcessor::from);
ReactiveAdapter adapter3 = getAdapter(FluxProcessor.class);
assertNotNull(adapter3);
assertNotSame(adapter1, adapter3);
}
use of reactor.core.publisher.FluxProcessor in project reactor-netty by reactor.
the class WebsocketTest method duplexEcho.
@Test
public void duplexEcho() throws Exception {
int c = 10;
CountDownLatch clientLatch = new CountDownLatch(c);
CountDownLatch serverLatch = new CountDownLatch(c);
FluxProcessor<String, String> server = ReplayProcessor.<String>create().serialize();
FluxProcessor<String, String> client = ReplayProcessor.<String>create().serialize();
server.log("server").subscribe(v -> serverLatch.countDown());
client.log("client").subscribe(v -> clientLatch.countDown());
httpServer = HttpServer.create(0).newHandler((in, out) -> out.sendWebsocket((i, o) -> o.sendString(i.receive().asString().take(c).subscribeWith(server)))).block(Duration.ofSeconds(30));
Flux.interval(Duration.ofMillis(200)).map(Object::toString).subscribe(client::onNext);
HttpClient.create(httpServer.address().getPort()).ws("/test").flatMap(in -> in.receiveWebsocket((i, o) -> o.options(opt -> opt.flushOnEach()).sendString(i.receive().asString().subscribeWith(client)))).subscribe();
Assert.assertTrue(serverLatch.await(10, TimeUnit.SECONDS));
Assert.assertTrue(clientLatch.await(10, TimeUnit.SECONDS));
}
use of reactor.core.publisher.FluxProcessor in project reactor-core by reactor.
the class FluxTests method shouldNotFlushStreamOnTimeoutPrematurely.
/**
* original from @oiavorskyl https://github.com/eventBus/eventBus/issues/358
* @throws Exception for convenience
*/
@Test
public void shouldNotFlushStreamOnTimeoutPrematurely() throws Exception {
final int NUM_MESSAGES = 100000;
final int BATCH_SIZE = 1000;
final int TIMEOUT = 100;
final int PARALLEL_STREAMS = 2;
/**
* Relative tolerance, default to 90% of the batches, in an operative environment, random factors can impact
* the fluxion latency, e.g. GC pause if system is under pressure.
*/
final double TOLERANCE = 0.9;
FluxProcessor<Integer, Integer> batchingStreamDef = EmitterProcessor.create();
List<Integer> testDataset = createTestDataset(NUM_MESSAGES);
final CountDownLatch latch = new CountDownLatch(NUM_MESSAGES);
Map<Integer, Integer> batchesDistribution = new ConcurrentHashMap<>();
batchingStreamDef.publishOn(asyncGroup).parallel(PARALLEL_STREAMS).groups().subscribe(substream -> substream.hide().publishOn(asyncGroup).bufferTimeout(BATCH_SIZE, Duration.ofMillis(TIMEOUT)).subscribe(items -> {
batchesDistribution.compute(items.size(), (key, value) -> value == null ? 1 : value + 1);
items.forEach(item -> latch.countDown());
}));
testDataset.forEach(batchingStreamDef::onNext);
System.out.println(batchesDistribution);
if (!latch.await(10, TimeUnit.SECONDS)) {
throw new RuntimeException(latch.getCount() + " ");
}
int messagesProcessed = batchesDistribution.entrySet().stream().mapToInt(entry -> entry.getKey() * entry.getValue()).reduce(Integer::sum).getAsInt();
assertEquals(NUM_MESSAGES, messagesProcessed);
assertTrue("Less than 90% (" + NUM_MESSAGES / BATCH_SIZE * TOLERANCE + ") of the batches are matching the buffer size: " + batchesDistribution.get(BATCH_SIZE), NUM_MESSAGES / BATCH_SIZE * TOLERANCE >= batchesDistribution.get(BATCH_SIZE) * TOLERANCE);
}
Aggregations