use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxWindowTimeoutTest method rejectedDuringLifecycle.
@Test
public void rejectedDuringLifecycle() {
AtomicBoolean reject = new AtomicBoolean();
Scheduler testScheduler = new Scheduler() {
@Override
public Disposable schedule(Runnable task) {
throw Exceptions.failWithRejected();
}
@Override
public Worker createWorker() {
return new Worker() {
Worker delegate = Schedulers.elastic().createWorker();
@Override
public Disposable schedule(Runnable task) {
throw Exceptions.failWithRejected();
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
if (reject.get())
throw Exceptions.failWithRejected();
return delegate.schedule(task, delay, unit);
}
@Override
public void dispose() {
delegate.dispose();
}
};
}
};
StepVerifier.create(Flux.range(1, 3).hide().windowTimeout(2, Duration.ofSeconds(2), testScheduler).concatMap(w -> {
reject.set(true);
return w.collectList();
})).verifyError(RejectedExecutionException.class);
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class HooksTest method parallelModeFused.
@Test
public void parallelModeFused() {
Hooks.onOperatorDebug();
Hooks.onEachOperator(p -> {
System.out.println(Scannable.from(p).operatorName());
return p;
});
Flux<Integer> source = Mono.just(1).flux().repeat(1000).publish().autoConnect();
int ncpu = Math.max(8, Runtime.getRuntime().availableProcessors());
Scheduler scheduler = Schedulers.newParallel("test", ncpu);
try {
Flux<Integer> result = ParallelFlux.from(source, ncpu).runOn(scheduler).map(v -> v + 1).log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE).sequential();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
result.subscribe(ts);
ts.await(Duration.ofSeconds(10));
ts.assertSubscribed().assertValueCount(1000).assertComplete().assertNoError();
} finally {
scheduler.dispose();
}
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxProcessorTest method testEmitter.
@Test
public void testEmitter() throws Throwable {
FluxProcessor<Integer, Integer> processor = EmitterProcessor.create();
int n = 100_000;
int subs = 4;
final CountDownLatch latch = new CountDownLatch((n + 1) * subs);
Scheduler c = Schedulers.single();
for (int i = 0; i < subs; i++) {
processor.publishOn(c).limitRate(1).subscribe(d -> latch.countDown(), null, latch::countDown);
}
FluxSink<Integer> session = processor.sink();
for (int i = 0; i < n; i++) {
while (session.requestedFromDownstream() == 0) {
}
session.next(i);
}
session.complete();
boolean waited = latch.await(5, TimeUnit.SECONDS);
Assert.assertTrue("latch : " + latch.getCount(), waited);
c.dispose();
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxPublishOnTest method crossRangePerfDefault.
@Test
public void crossRangePerfDefault() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Scheduler scheduler = Schedulers.fromExecutorService(exec);
int count = 1000;
Flux<Integer> source = Flux.range(1, count).flatMap(v -> Flux.range(v, 2), false, 128, 32);
source.publishOn(scheduler).subscribe(ts);
if (!ts.await(Duration.ofSeconds(10)).isTerminated()) {
ts.cancel();
}
ts.assertValueCount(count * 2).assertNoError().assertComplete();
}
use of reactor.core.scheduler.Scheduler in project reactor-core by reactor.
the class FluxMergeSequentialTest method mergeSequentialLargeUnorderedEach100.
@Test
public void mergeSequentialLargeUnorderedEach100() {
Scheduler scheduler = Schedulers.elastic();
AtomicBoolean comparisonFailure = new AtomicBoolean();
long count = Flux.range(0, 500).flatMapSequential(i -> {
// ensure each pack of 100 is delayed in inverse order
Duration sleep = Duration.ofMillis(600 - i % 100);
return Mono.delay(sleep).then(Mono.just(i)).subscribeOn(scheduler);
}).zipWith(Flux.range(0, Integer.MAX_VALUE)).doOnNext(i -> {
if (!Objects.equals(i.getT1(), i.getT2())) {
// System.out.println(i);
comparisonFailure.set(true);
}
}).count().block();
assertEquals(500L, count);
assertFalse(comparisonFailure.get());
}
Aggregations