use of org.infinispan.util.concurrent.CommandAckCollector.MultiTargetCollector in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method handleWriteManyOnOrigin.
private <C extends WriteCommand, Container, Item> Object handleWriteManyOnOrigin(InvocationContext ctx, C command, WriteManyCommandHelper<C, Container, Item> helper) {
LocalizedCacheTopology cacheTopology = checkTopology(command);
Map<Address, Container> remoteEntries = new HashMap<>();
for (Item item : helper.getItems(command)) {
Object key = helper.item2key(item);
DistributionInfo info = cacheTopology.getDistribution(key);
Address primary = info.primary();
if (primary == null) {
throw AllOwnersLostException.INSTANCE;
} else {
Container currentEntries = remoteEntries.computeIfAbsent(primary, k -> helper.newContainer());
helper.accumulate(currentEntries, item);
}
}
Object[] results = command.loadType() == DONT_LOAD ? null : new Object[command.getAffectedKeys().size()];
MergingCompletableFuture<Object> allFuture = new SyncMergingCompletableFuture<>(remoteEntries.size(), results, helper::transformResult);
int offset = 0;
Container localEntries = remoteEntries.remove(rpcManager.getAddress());
if (localEntries != null) {
helper.containerSize(localEntries);
C localCommand = helper.copyForLocal(command, localEntries);
localCommand.setTopologyId(command.getTopologyId());
LocalWriteManyHandler handler = new LocalWriteManyHandler(allFuture, localCommand.getAffectedKeys(), cacheTopology);
invokeNextAndFinally(ctx, localCommand, handler);
}
// This will be null in a non-biased variant
MultiTargetCollector multiTargetCollector = createMultiTargetCollector(command, remoteEntries.size());
for (Map.Entry<Address, Container> ownerEntry : remoteEntries.entrySet()) {
Address owner = ownerEntry.getKey();
// TODO: copyForLocal just creates the command with given entries, not using the segment-aware map
Container container = ownerEntry.getValue();
C toPrimary = helper.copyForLocal(command, container);
toPrimary.setTopologyId(command.getTopologyId());
CompletionStage<ValidResponse> rpcFuture = manyWriteOnRemotePrimary(owner, toPrimary, multiTargetCollector);
int myOffset = offset;
offset += helper.containerSize(container);
rpcFuture.whenComplete((response, t) -> {
if (t != null) {
allFuture.completeExceptionally(t);
return;
}
Object responseValue = response.getResponseValue();
// Note: we could use PrimaryResponseHandler, but we would have to add the reference to allFuture, offset...
InternalCacheValue[] values;
try {
if (command.loadType() == DONT_LOAD) {
if (!(responseValue instanceof InternalCacheValue[])) {
allFuture.completeExceptionally(new CacheException("Response from " + owner + ": expected InternalCacheValue[] but it is " + responseValue));
return;
}
values = (InternalCacheValue[]) responseValue;
} else {
if (!(responseValue instanceof Object[]) || (((Object[]) responseValue).length != 2)) {
allFuture.completeExceptionally(new CacheException("Response from " + owner + ": expected Object[2] but it is " + responseValue));
return;
}
// We use Object[] { InternalCacheValue[], Object[] } structure to get benefit of same-type array marshalling
// TODO optimize returning entry itself
// Note: some interceptors relying on the return value *could* have a problem interpreting this
values = (InternalCacheValue[]) ((Object[]) responseValue)[0];
MergingCompletableFuture.moveListItemsToFuture(((Object[]) responseValue)[1], allFuture, myOffset);
}
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
synchronized (allFuture) {
if (allFuture.isDone()) {
return;
}
int i = 0;
for (Object key : helper.toKeys(container)) {
// we will serve as the backup
InternalCacheEntry ice = values[i++].toInternalCacheEntry(key);
entryFactory.wrapExternalEntry(ctx, key, ice, true, true);
RepeatableReadEntry entry = (RepeatableReadEntry) ctx.lookupEntry(key);
// we don't care about setCreated() since backup owner should not fire listeners
entry.setChanged(true);
aggregateCompletionStage.dependsOn(commitSingleEntryIfNewer(entry, ctx, command));
if (entry.isCommitted() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
scheduleKeyInvalidation(entry.getKey(), entry.getMetadata().version(), entry.isRemoved());
}
}
assert i == values.length;
}
aggregateCompletionStage.freeze().thenRun(allFuture::countDown);
} catch (Throwable t2) {
allFuture.completeExceptionally(t2);
}
});
}
return asyncValue(allFuture);
}
Aggregations