use of reactor.core.publisher.EmitterProcessor in project reactor-core by reactor.
the class FluxTests method parallelTest.
private void parallelTest(String dispatcher, int iterations) throws InterruptedException {
System.out.println("Dispatcher: " + dispatcher);
System.out.println("..........: " + iterations);
int[] data;
CountDownLatch latch = new CountDownLatch(iterations);
EmitterProcessor<Integer> deferred;
switch(dispatcher) {
case "partitioned":
deferred = EmitterProcessor.create();
deferred.publishOn(asyncGroup).parallel(2).groups().subscribe(stream -> stream.publishOn(asyncGroup).map(i -> i).scan(1, (acc, next) -> acc + next).subscribe(i -> latch.countDown()));
break;
default:
deferred = EmitterProcessor.create();
deferred.publishOn(asyncGroup).map(i -> i).scan(1, (acc, next) -> acc + next).subscribe(i -> latch.countDown());
}
data = new int[iterations];
for (int i = 0; i < iterations; i++) {
data[i] = i;
}
long start = System.currentTimeMillis();
for (int i : data) {
deferred.onNext(i);
}
if (!latch.await(15, TimeUnit.SECONDS)) {
throw new RuntimeException("Count:" + (iterations - latch.getCount()) + " ");
}
long stop = System.currentTimeMillis() - start;
stop = stop > 0 ? stop : 1;
System.out.println("Time spent: " + stop + "ms");
System.out.println("ev/ms: " + iterations / stop);
System.out.println("ev/s: " + iterations / stop * 1000);
System.out.println("");
assertEquals(0, latch.getCount());
}
use of reactor.core.publisher.EmitterProcessor in project reactor-core by reactor.
the class FluxTests method multiplexUsingDispatchersAndSplit.
/**
* <pre>
* forkStream
* / \ < - - - int
* v v
* persistenceStream computationStream
* \ / < - - - List< String >
* v v
* joinStream < - - - String
* splitStream
* observedSplitStream
* </pre>
* @throws Exception for convenience
*/
@Test(timeout = TIMEOUT)
public void multiplexUsingDispatchersAndSplit() throws Exception {
final EmitterProcessor<Integer> forkEmitterProcessor = EmitterProcessor.create();
final EmitterProcessor<Integer> computationEmitterProcessor = EmitterProcessor.create(false);
Scheduler computation = Schedulers.newSingle("computation");
Scheduler persistence = Schedulers.newSingle("persistence");
Scheduler forkJoin = Schedulers.newParallel("forkJoin", 2);
final Flux<List<String>> computationStream = computationEmitterProcessor.publishOn(computation).map(i -> {
final List<String> list = new ArrayList<>(i);
for (int j = 0; j < i; j++) {
list.add("i" + j);
}
return list;
}).doOnNext(ls -> println("Computed: ", ls)).log("computation");
final EmitterProcessor<Integer> persistenceEmitterProcessor = EmitterProcessor.create(false);
final Flux<List<String>> persistenceStream = persistenceEmitterProcessor.publishOn(persistence).doOnNext(i -> println("Persisted: ", i)).map(i -> Collections.singletonList("done" + i)).log("persistence");
Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin).log("fork");
forkStream.subscribe(computationEmitterProcessor);
forkStream.subscribe(persistenceEmitterProcessor);
final Flux<List<String>> joinStream = Flux.zip(computationStream, persistenceStream, (a, b) -> Arrays.asList(a, b)).publishOn(forkJoin).map(listOfLists -> {
listOfLists.get(0).addAll(listOfLists.get(1));
return listOfLists.get(0);
}).log("join");
final Semaphore doneSemaphore = new Semaphore(0);
final MonoProcessor<List<String>> listPromise = joinStream.flatMap(Flux::fromIterable).log("resultStream").collectList().doOnTerminate(doneSemaphore::release).toProcessor();
listPromise.subscribe();
forkEmitterProcessor.onNext(1);
forkEmitterProcessor.onNext(2);
forkEmitterProcessor.onNext(3);
forkEmitterProcessor.onComplete();
List<String> res = listPromise.block(Duration.ofSeconds(5));
assertEquals(Arrays.asList("i0", "done1", "i0", "i1", "done2", "i0", "i1", "i2", "done3"), res);
forkJoin.dispose();
persistence.dispose();
computation.dispose();
}
use of reactor.core.publisher.EmitterProcessor in project reactor-core by reactor.
the class FluxTests method parallelMapManyTest.
private void parallelMapManyTest(String dispatcher, int iterations) throws InterruptedException {
System.out.println("MM Dispatcher: " + dispatcher);
System.out.println("..........: " + iterations);
int[] data;
CountDownLatch latch = new CountDownLatch(iterations);
EmitterProcessor<Integer> mapManydeferred;
switch(dispatcher) {
case "partitioned":
mapManydeferred = EmitterProcessor.create();
mapManydeferred.parallel(4).groups().subscribe(substream -> substream.publishOn(asyncGroup).subscribe(i -> latch.countDown()));
break;
default:
mapManydeferred = EmitterProcessor.create();
("sync".equals(dispatcher) ? mapManydeferred : mapManydeferred.publishOn(asyncGroup)).flatMap(Flux::just).subscribe(i -> latch.countDown());
}
data = new int[iterations];
for (int i = 0; i < iterations; i++) {
data[i] = i;
}
long start = System.currentTimeMillis();
for (int i : data) {
mapManydeferred.onNext(i);
}
if (!latch.await(20, TimeUnit.SECONDS)) {
throw new RuntimeException(latch.getCount() + "");
} else {
System.out.println(latch.getCount());
}
assertEquals(0, latch.getCount());
long stop = System.currentTimeMillis() - start;
stop = stop > 0 ? stop : 1;
System.out.println("MM Dispatcher: " + dispatcher);
System.out.println("Time spent: " + stop + "ms");
System.out.println("ev/ms: " + iterations / stop);
System.out.println("ev/s: " + iterations / stop * 1000);
System.out.println("");
}
Aggregations