use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxSpecTests method whenProcessorIsStreamed.
@Test
public void whenProcessorIsStreamed() {
// "When a processor is streamed"
// given: "a source composable and a async downstream"
ReplayProcessor<Integer> source = ReplayProcessor.create();
Scheduler scheduler = Schedulers.newParallel("test", 2);
try {
Mono<List<Integer>> res = source.subscribeOn(scheduler).delaySubscription(Duration.ofMillis(1L)).log("streamed").map(it -> it * 2).buffer().publishNext();
res.subscribe();
// when: "the source accepts a value"
source.onNext(1);
source.onNext(2);
source.onNext(3);
source.onNext(4);
source.onComplete();
// then: "the res is passed on"
assertThat(res.block()).containsExactly(2, 4, 6, 8);
} finally {
scheduler.dispose();
}
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxTests method multiplexUsingDispatchersAndSplit.
/**
* <pre>
* forkStream
* / \ < - - - int
* v v
* persistenceStream computationStream
* \ / < - - - List< String >
* v v
* joinStream < - - - String
* splitStream
* observedSplitStream
* </pre>
* @throws Exception for convenience
*/
@Test(timeout = TIMEOUT)
public void multiplexUsingDispatchersAndSplit() throws Exception {
final EmitterProcessor<Integer> forkEmitterProcessor = EmitterProcessor.create();
final EmitterProcessor<Integer> computationEmitterProcessor = EmitterProcessor.create(false);
Scheduler computation = Schedulers.newSingle("computation");
Scheduler persistence = Schedulers.newSingle("persistence");
Scheduler forkJoin = Schedulers.newParallel("forkJoin", 2);
final Flux<List<String>> computationStream = computationEmitterProcessor.publishOn(computation).map(i -> {
final List<String> list = new ArrayList<>(i);
for (int j = 0; j < i; j++) {
list.add("i" + j);
}
return list;
}).doOnNext(ls -> println("Computed: ", ls)).log("computation");
final EmitterProcessor<Integer> persistenceEmitterProcessor = EmitterProcessor.create(false);
final Flux<List<String>> persistenceStream = persistenceEmitterProcessor.publishOn(persistence).doOnNext(i -> println("Persisted: ", i)).map(i -> Collections.singletonList("done" + i)).log("persistence");
Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin).log("fork");
forkStream.subscribe(computationEmitterProcessor);
forkStream.subscribe(persistenceEmitterProcessor);
final Flux<List<String>> joinStream = Flux.zip(computationStream, persistenceStream, (a, b) -> Arrays.asList(a, b)).publishOn(forkJoin).map(listOfLists -> {
listOfLists.get(0).addAll(listOfLists.get(1));
return listOfLists.get(0);
}).log("join");
final Semaphore doneSemaphore = new Semaphore(0);
final MonoProcessor<List<String>> listPromise = joinStream.flatMap(Flux::fromIterable).log("resultStream").collectList().doOnTerminate(doneSemaphore::release).toProcessor();
listPromise.subscribe();
forkEmitterProcessor.onNext(1);
forkEmitterProcessor.onNext(2);
forkEmitterProcessor.onNext(3);
forkEmitterProcessor.onComplete();
List<String> res = listPromise.block(Duration.ofSeconds(5));
assertEquals(Arrays.asList("i0", "done1", "i0", "i1", "done2", "i0", "i1", "i2", "done3"), res);
forkJoin.dispose();
persistence.dispose();
computation.dispose();
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class WorkQueueProcessorTest method highRate.
@Test
public void highRate() throws Exception {
WorkQueueProcessor<String> queueProcessor = WorkQueueProcessor.<String>builder().share(true).name("Processor").bufferSize(256).waitStrategy(liteBlocking()).build();
Scheduler timer = Schedulers.newSingle("Timer");
queueProcessor.bufferTimeout(32, Duration.ofMillis(2), timer).subscribe(new CoreSubscriber<List<String>>() {
int counter;
@Override
public void onComplete() {
System.out.println("Consumed in total: " + counter);
}
@Override
public void onError(Throwable t) {
t.printStackTrace();
}
@Override
public void onNext(List<String> strings) {
int size = strings.size();
counter += size;
if (strings.contains(s)) {
synchronized (s) {
// logger.debug("Synchronizer!");
s.notifyAll();
}
}
}
@Override
public void onSubscribe(Subscription s) {
s.request(Long.MAX_VALUE);
}
});
FluxSink<String> emitter = queueProcessor.sink();
try {
submitInCurrentThread(emitter);
} finally {
logger.debug("Finishing");
emitter.complete();
timer.dispose();
}
TimeUnit.SECONDS.sleep(1);
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class ParallelFluxTest method parallelModeFused.
@Test
public void parallelModeFused() {
Flux<Integer> source = Flux.range(1, 1_000_000);
int ncpu = Math.max(8, Runtime.getRuntime().availableProcessors());
for (int i = 1; i < ncpu + 1; i++) {
Scheduler scheduler = Schedulers.newParallel("test", i);
try {
Flux<Integer> result = ParallelFlux.from(source, i).runOn(scheduler).map(v -> v + 1).sequential();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
result.subscribe(ts);
ts.await(Duration.ofSeconds(10));
ts.assertSubscribed().assertValueCount(1_000_000).assertComplete().assertNoError();
} finally {
scheduler.dispose();
}
}
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class ParallelMergeOrderedTest method reorderingByIndex.
@Test
public void reorderingByIndex() {
final int LOOPS = 100;
final int PARALLELISM = 2;
final List<Integer> ordered = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
int notShuffled = 0;
for (int i = 0; i < LOOPS; i++) {
final Scheduler SCHEDULER = Schedulers.newParallel("test", PARALLELISM);
final List<Integer> disordered = Collections.synchronizedList(new ArrayList<>());
List<Integer> reordered = Flux.fromIterable(ordered).hide().index().parallel(PARALLELISM).runOn(SCHEDULER).doOnNext(t2 -> disordered.add(t2.getT2())).ordered(Comparator.comparing(Tuple2::getT1)).map(Tuple2::getT2).collectList().block();
SCHEDULER.dispose();
assertThat(reordered).containsExactlyElementsOf(ordered);
assertThat(disordered).containsExactlyInAnyOrderElementsOf(ordered);
try {
assertThat(disordered).doesNotContainSequence(ordered);
System.out.println("parallel shuffled the collection into " + disordered);
break;
} catch (AssertionError e) {
notShuffled++;
}
}
if (notShuffled > 0) {
System.out.println("not shuffled loops: " + notShuffled);
}
assertThat(LOOPS - notShuffled).as("at least one run shuffled").isGreaterThan(0);
}
Aggregations