use of org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult in project infinispan by infinispan.
the class ClusterPublisherManagerImpl method startSegmentPublisher.
private <I, R> void startSegmentPublisher(boolean parallelPublisher, IntSet segments, InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType, Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer, Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer, FlowableProcessor<R> flowableProcessor) {
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
Address localAddress = topology.getLocalAddress();
Map<Address, IntSet> targets = determineSegmentTargets(topology, segments, localAddress);
int targetSize = targets.size();
if (targetSize == 0) {
handleNoTargets(transformer, flowableProcessor);
return;
}
// used to determine that last parallel completion, to either complete or retry
AtomicInteger parallelCount;
boolean useContext = ctx != null && ctx.lookedUpEntriesCount() > 0;
Map<Address, Set<K>> keysToExcludeByAddress;
if (useContext) {
parallelCount = new AtomicInteger(targetSize + 1);
keysToExcludeByAddress = determineKeyTargets(topology, (Set<K>) ctx.getLookedUpEntries().keySet(), localAddress, segments, null);
} else {
parallelCount = new AtomicInteger(targetSize);
keysToExcludeByAddress = Collections.emptyMap();
}
// This way we only have to allocate 1 per request chain
BiConsumer<PublisherResult<R>, Throwable> biConsumer = new SegmentSpecificConsumer<>(flowableProcessor, parallelCount, topology.getTopologyId(), parallelPublisher, ctx, explicitFlags, deliveryGuarantee, composedType, transformer, finalizer);
IntSet localSegments = targets.remove(localAddress);
// If any targets left, they are all remote
if (!targets.isEmpty()) {
// We submit the remote ones first as they will not block at all, just to send remote tasks
for (Map.Entry<Address, IntSet> remoteTarget : targets.entrySet()) {
Address remoteAddress = remoteTarget.getKey();
IntSet remoteSegments = remoteTarget.getValue();
ReductionPublisherRequestCommand<K> command = composedType.remoteInvocation(parallelPublisher, remoteSegments, null, keysToExcludeByAddress.get(remoteAddress), explicitFlags, deliveryGuarantee, transformer, finalizer);
command.setTopologyId(topology.getTopologyId());
CompletionStage<PublisherResult<R>> stage = rpcManager.invokeCommand(remoteAddress, command, new SegmentPublisherResultCollector<>(remoteSegments), rpcManager.getSyncRpcOptions());
stage.whenComplete(biConsumer);
}
}
if (localSegments != null) {
CompletionStage<PublisherResult<R>> localStage = composedType.localInvocation(parallelPublisher, localSegments, null, keysToExcludeByAddress.get(localAddress), explicitFlags, deliveryGuarantee, transformer, finalizer);
if (log.isTraceEnabled()) {
// Make sure the trace occurs before response is processed
localStage = localStage.whenComplete((results, t) -> {
if (t != null) {
log.tracef(t, "Received exception while processing segments %s from %s", localSegments, localAddress);
} else {
log.tracef("Result was: %s for segments %s from %s with %s suspected segments", results.getResult(), localSegments, localAddress, results.getSuspectedSegments());
}
});
}
// Map to the same collector, so we can reuse the same BiConsumer
localStage.whenComplete(biConsumer);
}
if (useContext) {
handleContextInvocation(segments, null, ctx, composedType, transformer, biConsumer);
}
}
use of org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult in project infinispan by infinispan.
the class ClusterPublisherManagerImpl method startKeyPublisher.
private <I, R> void startKeyPublisher(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude, InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType, Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer, Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer, FlowableProcessor<R> flowableProcessor) {
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
Address localAddress = topology.getLocalAddress();
// This excludes the keys from the various address targets
Map<Address, Set<K>> keyTargets = determineKeyTargets(topology, keysToInclude, localAddress, segments, ctx);
int keyTargetSize = keyTargets.size();
if (keyTargetSize == 0) {
handleNoTargets(transformer, flowableProcessor);
return;
}
AtomicInteger parallelCount;
boolean useContext = ctx != null && ctx.lookedUpEntriesCount() > 0;
if (useContext) {
parallelCount = new AtomicInteger(keyTargetSize + 1);
} else {
parallelCount = new AtomicInteger(keyTargetSize);
}
// This way we only have to allocate 1 per request chain
BiConsumer<PublisherResult<R>, Throwable> biConsumer = new KeyBiConsumer<>(flowableProcessor, parallelCount, topology.getTopologyId(), parallelPublisher, explicitFlags, deliveryGuarantee, composedType, transformer, finalizer);
Set<K> localKeys = keyTargets.remove(localAddress);
// If any targets left, they are all remote
if (!keyTargets.isEmpty()) {
// We submit the remote ones first as they will not block at all, just to send remote tasks
for (Map.Entry<Address, Set<K>> remoteTarget : keyTargets.entrySet()) {
Address remoteAddress = remoteTarget.getKey();
Set<K> remoteKeys = remoteTarget.getValue();
ReductionPublisherRequestCommand<K> command = composedType.remoteInvocation(parallelPublisher, null, remoteKeys, null, explicitFlags, deliveryGuarantee, transformer, finalizer);
command.setTopologyId(topology.getTopologyId());
CompletionStage<PublisherResult<R>> stage = rpcManager.invokeCommand(remoteAddress, command, new KeyPublisherResultCollector<>(remoteKeys), rpcManager.getSyncRpcOptions());
stage.whenComplete(biConsumer);
}
}
if (localKeys != null) {
CompletionStage<PublisherResult<R>> localStage = composedType.localInvocation(parallelPublisher, null, localKeys, null, explicitFlags, deliveryGuarantee, transformer, finalizer);
if (log.isTraceEnabled()) {
// Make sure the trace occurs before response is processed
localStage = localStage.whenComplete((results, t) -> {
if (t != null) {
log.tracef(t, "Received exception while processing keys %s from %s", localKeys, localAddress);
} else {
log.tracef("Result was: %s for keys %s from %s with %s suspected keys", results.getResult(), localKeys, localAddress, results.getSuspectedKeys());
}
});
}
// Map to the same collector, so we can reuse the same BiConsumer
localStage.whenComplete(biConsumer);
}
if (useContext) {
handleContextInvocation(segments, keysToInclude, ctx, composedType, transformer, biConsumer);
}
}
use of org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult in project infinispan by infinispan.
the class SimpleLocalPublisherManagerTest method testWithAsyncOperation.
@Test(dataProvider = "GuaranteeParallelEntry")
public void testWithAsyncOperation(DeliveryGuarantee deliveryGuarantee, boolean isParallel, boolean isEntry) {
Cache<Integer, String> cache = cache(0);
Map<Integer, String> inserted = insert(cache);
BlockingManager blockingManager = TestingUtil.extractComponent(cache, BlockingManager.class);
LocalPublisherManager<Integer, String> lpm = lpm(cache);
IntSet allSegments = IntSets.immutableRangeSet(SEGMENT_COUNT);
CompletionStage<PublisherResult<Set<Object>>> stage;
Consumer<Object> assertConsumer;
Collector<Object, ?, Set<Object>> collector = Collectors.toSet();
BiFunction<Set<Object>, Set<Object>, Set<Object>> reduceBiFunction = (left, right) -> {
left.addAll(right);
return left;
};
io.reactivex.rxjava3.functions.Function<Object, Single<Object>> sleepOnBlockingPoolFunction = value -> Single.fromCompletionStage(blockingManager.supplyBlocking(() -> value, "test-blocking-thread"));
if (isEntry) {
stage = lpm.keyReduction(isParallel, allSegments, null, null, 0L, deliveryGuarantee, publisher -> Flowable.fromPublisher(publisher).concatMapSingle(sleepOnBlockingPoolFunction).collect(collector).toCompletionStage(), publisher -> Flowable.fromPublisher(publisher).reduce(reduceBiFunction).toCompletionStage(Collections.emptySet()));
assertConsumer = obj -> assertTrue(inserted.containsKey(obj));
} else {
stage = lpm.entryReduction(isParallel, allSegments, null, null, 0L, deliveryGuarantee, publisher -> Flowable.fromPublisher(publisher).concatMapSingle(sleepOnBlockingPoolFunction).collect(collector).toCompletionStage(), publisher -> Flowable.fromPublisher(publisher).reduce(reduceBiFunction).toCompletionStage(Collections.emptySet()));
assertConsumer = obj -> {
Map.Entry<Object, Object> entry = (Map.Entry) obj;
Object value = inserted.get(entry.getKey());
assertEquals(value, entry.getValue());
};
}
DistributionManager dm = TestingUtil.extractComponent(cache, DistributionManager.class);
IntSet localSegments = dm.getCacheTopology().getLocalReadSegments();
int expected = SimpleClusterPublisherManagerTest.findHowManyInSegments(inserted.size(), localSegments, TestingUtil.extractComponent(cache, KeyPartitioner.class));
Set<Object> results = CompletionStages.join(stage).getResult();
assertEquals(expected, results.size());
results.forEach(assertConsumer);
}
Aggregations