use of org.infinispan.distribution.DistributionInfo in project infinispan by infinispan.
the class FunctionalTxTest method testBeforeTopology.
private void testBeforeTopology(BiFunction<FunctionalMap.ReadWriteMap<String, Integer>, String, Integer> op, int expectedIncrement) throws Exception {
cache(0).put("key", 1);
// Blocking on receiver side. We cannot block the StateResponseCommand on the server side since
// the InternalCacheEntries in its state are the same instances of data stored in DataContainer
// - therefore when the command is blocked on sender the command itself would be mutated by applying
// the transaction below.
BlockingStateConsumer bsc2 = TestingUtil.wrapComponent(cache(2), StateConsumer.class, BlockingStateConsumer::new);
tm(2).begin();
FunctionalMap.ReadWriteMap<String, Integer> rw = ReadWriteMapImpl.create(FunctionalMapImpl.create(this.<String, Integer>cache(2).getAdvancedCache()));
assertEquals(Integer.valueOf(1), op.apply(rw, "key"));
Transaction tx = tm(2).suspend();
chf.setOwnerIndexes(0, 2);
EmbeddedCacheManager cm = createClusteredCacheManager(false, GlobalConfigurationBuilder.defaultClusteredBuilder(), cb, new TransportFlags());
registerCacheManager(cm);
Future<?> future = fork(() -> {
cache(3);
});
bsc2.await();
DistributionInfo distributionInfo = cache(2).getAdvancedCache().getDistributionManager().getCacheTopology().getDistribution("key");
assertFalse(distributionInfo.isReadOwner());
assertTrue(distributionInfo.isWriteBackup());
tm(2).resume(tx);
tm(2).commit();
bsc2.unblock();
future.get(10, TimeUnit.SECONDS);
InternalCacheEntry<Object, Object> ice = cache(2).getAdvancedCache().getDataContainer().get("key");
assertEquals("Current ICE: " + ice, 1 + expectedIncrement, ice.getValue());
}
use of org.infinispan.distribution.DistributionInfo in project infinispan by infinispan.
the class ClusteringInterceptor method visitTouchCommand.
@Override
public Object visitTouchCommand(InvocationContext ctx, TouchCommand command) throws Throwable {
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP)) {
return invokeNext(ctx, command);
}
LocalizedCacheTopology cacheTopology = checkTopologyId(command);
DistributionInfo info = cacheTopology.getSegmentDistribution(command.getSegment());
// Scattered any node could be a backup, so we have to touch all members
List<Address> owners = isScattered ? cacheTopology.getActualMembers() : info.readOwners();
if (touchMode == TouchMode.ASYNC) {
if (ctx.isOriginLocal()) {
// Send to all the owners
rpcManager.sendToMany(owners, command, DeliverOrder.NONE);
}
return invokeNext(ctx, command);
}
if (info.isPrimary()) {
AbstractTouchResponseCollector collector = isScattered ? ScatteredTouchResponseCollector.INSTANCE : TouchResponseCollector.INSTANCE;
CompletionStage<Boolean> remoteInvocation = rpcManager.invokeCommand(owners, command, collector, rpcManager.getSyncRpcOptions());
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rValue) -> {
Boolean touchedLocally = (Boolean) rValue;
if (touchedLocally) {
return asyncValue(remoteInvocation);
}
// If primary can't touch - it doesn't matter about others
return Boolean.FALSE;
});
} else if (ctx.isOriginLocal()) {
// Send to the primary owner
CompletionStage<ValidResponse> remoteInvocation = rpcManager.invokeCommand(info.primary(), command, SingleResponseCollector.validOnly(), rpcManager.getSyncRpcOptions());
return asyncValue(remoteInvocation).thenApply(ctx, command, (rCtx, rCommand, rResponse) -> ((ValidResponse) rResponse).getResponseValue());
}
return invokeNext(ctx, command);
}
use of org.infinispan.distribution.DistributionInfo in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method visitGetAllCommand.
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
LocalizedCacheTopology cacheTopology = checkTopology(command);
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP | FlagBitSets.SKIP_OWNERSHIP_CHECK)) {
return invokeNext(ctx, command);
}
if (ctx.isOriginLocal()) {
Map<Address, List<Object>> remoteKeys = new HashMap<>();
for (Object key : command.getKeys()) {
if (ctx.lookupEntry(key) != null) {
continue;
}
DistributionInfo info = cacheTopology.getDistribution(key);
if (info.primary() == null) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
} else if (!info.isPrimary()) {
remoteKeys.computeIfAbsent(info.primary(), k -> new ArrayList<>()).add(key);
}
}
if (remoteKeys.isEmpty()) {
return invokeNext(ctx, command);
}
ClusteredGetAllFuture sync = new ClusteredGetAllFuture(remoteKeys.size());
for (Map.Entry<Address, List<Object>> remote : remoteKeys.entrySet()) {
List<Object> keys = remote.getValue();
ClusteredGetAllCommand clusteredGetAllCommand = cf.buildClusteredGetAllCommand(keys, command.getFlagsBitSet(), null);
clusteredGetAllCommand.setTopologyId(command.getTopologyId());
SingletonMapResponseCollector collector = SingletonMapResponseCollector.ignoreLeavers();
CompletionStage<Map<Address, Response>> rpcFuture = rpcManager.invokeCommand(remote.getKey(), clusteredGetAllCommand, collector, rpcManager.getSyncRpcOptions());
rpcFuture.whenComplete(((responseMap, throwable) -> handleGetAllResponse(responseMap, throwable, ctx, keys, sync)));
}
return asyncInvokeNext(ctx, command, sync);
} else {
// remote
for (Object key : command.getKeys()) {
if (ctx.lookupEntry(key) == null) {
return UnsureResponse.INSTANCE;
}
}
return invokeNext(ctx, command);
}
}
use of org.infinispan.distribution.DistributionInfo in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method handleWriteManyOnOrigin.
private <C extends WriteCommand, Container, Item> Object handleWriteManyOnOrigin(InvocationContext ctx, C command, WriteManyCommandHelper<C, Container, Item> helper) {
LocalizedCacheTopology cacheTopology = checkTopology(command);
Map<Address, Container> remoteEntries = new HashMap<>();
for (Item item : helper.getItems(command)) {
Object key = helper.item2key(item);
DistributionInfo info = cacheTopology.getDistribution(key);
Address primary = info.primary();
if (primary == null) {
throw AllOwnersLostException.INSTANCE;
} else {
Container currentEntries = remoteEntries.computeIfAbsent(primary, k -> helper.newContainer());
helper.accumulate(currentEntries, item);
}
}
Object[] results = command.loadType() == DONT_LOAD ? null : new Object[command.getAffectedKeys().size()];
MergingCompletableFuture<Object> allFuture = new SyncMergingCompletableFuture<>(remoteEntries.size(), results, helper::transformResult);
int offset = 0;
Container localEntries = remoteEntries.remove(rpcManager.getAddress());
if (localEntries != null) {
helper.containerSize(localEntries);
C localCommand = helper.copyForLocal(command, localEntries);
localCommand.setTopologyId(command.getTopologyId());
LocalWriteManyHandler handler = new LocalWriteManyHandler(allFuture, localCommand.getAffectedKeys(), cacheTopology);
invokeNextAndFinally(ctx, localCommand, handler);
}
// This will be null in a non-biased variant
MultiTargetCollector multiTargetCollector = createMultiTargetCollector(command, remoteEntries.size());
for (Map.Entry<Address, Container> ownerEntry : remoteEntries.entrySet()) {
Address owner = ownerEntry.getKey();
// TODO: copyForLocal just creates the command with given entries, not using the segment-aware map
Container container = ownerEntry.getValue();
C toPrimary = helper.copyForLocal(command, container);
toPrimary.setTopologyId(command.getTopologyId());
CompletionStage<ValidResponse> rpcFuture = manyWriteOnRemotePrimary(owner, toPrimary, multiTargetCollector);
int myOffset = offset;
offset += helper.containerSize(container);
rpcFuture.whenComplete((response, t) -> {
if (t != null) {
allFuture.completeExceptionally(t);
return;
}
Object responseValue = response.getResponseValue();
// Note: we could use PrimaryResponseHandler, but we would have to add the reference to allFuture, offset...
InternalCacheValue[] values;
try {
if (command.loadType() == DONT_LOAD) {
if (!(responseValue instanceof InternalCacheValue[])) {
allFuture.completeExceptionally(new CacheException("Response from " + owner + ": expected InternalCacheValue[] but it is " + responseValue));
return;
}
values = (InternalCacheValue[]) responseValue;
} else {
if (!(responseValue instanceof Object[]) || (((Object[]) responseValue).length != 2)) {
allFuture.completeExceptionally(new CacheException("Response from " + owner + ": expected Object[2] but it is " + responseValue));
return;
}
// We use Object[] { InternalCacheValue[], Object[] } structure to get benefit of same-type array marshalling
// TODO optimize returning entry itself
// Note: some interceptors relying on the return value *could* have a problem interpreting this
values = (InternalCacheValue[]) ((Object[]) responseValue)[0];
MergingCompletableFuture.moveListItemsToFuture(((Object[]) responseValue)[1], allFuture, myOffset);
}
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
synchronized (allFuture) {
if (allFuture.isDone()) {
return;
}
int i = 0;
for (Object key : helper.toKeys(container)) {
// we will serve as the backup
InternalCacheEntry ice = values[i++].toInternalCacheEntry(key);
entryFactory.wrapExternalEntry(ctx, key, ice, true, true);
RepeatableReadEntry entry = (RepeatableReadEntry) ctx.lookupEntry(key);
// we don't care about setCreated() since backup owner should not fire listeners
entry.setChanged(true);
aggregateCompletionStage.dependsOn(commitSingleEntryIfNewer(entry, ctx, command));
if (entry.isCommitted() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
scheduleKeyInvalidation(entry.getKey(), entry.getMetadata().version(), entry.isRemoved());
}
}
assert i == values.length;
}
aggregateCompletionStage.freeze().thenRun(allFuture::countDown);
} catch (Throwable t2) {
allFuture.completeExceptionally(t2);
}
});
}
return asyncValue(allFuture);
}
use of org.infinispan.distribution.DistributionInfo in project infinispan by infinispan.
the class BaseDistributionInterceptor method primaryReturnHandler.
protected Object primaryReturnHandler(InvocationContext ctx, AbstractDataWriteCommand command, Object localResult) {
if (!command.isSuccessful()) {
if (log.isTraceEnabled())
log.tracef("Skipping the replication of the conditional command as it did not succeed on primary owner (%s).", command);
return localResult;
}
LocalizedCacheTopology cacheTopology = checkTopologyId(command);
int segment = SegmentSpecificCommand.extractSegment(command, command.getKey(), keyPartitioner);
DistributionInfo distributionInfo = cacheTopology.getSegmentDistribution(segment);
Collection<Address> owners = distributionInfo.writeOwners();
if (owners.size() == 1) {
// There are no backups, skip the replication part.
return localResult;
}
// Cache the matcher and reset it if we get OOTE (or any other exception) from backup
ValueMatcher originalMatcher = command.getValueMatcher();
// Ignore the previous value on the backup owners
command.setValueMatcher(ValueMatcher.MATCH_ALWAYS);
if (!isSynchronous(command)) {
if (isReplicated) {
rpcManager.sendToAll(command, DeliverOrder.PER_SENDER);
} else {
rpcManager.sendToMany(owners, command, DeliverOrder.PER_SENDER);
}
// Switch to the retry policy, in case the primary owner changes before we commit locally
command.setValueMatcher(originalMatcher.matcherForRetry());
return localResult;
}
VoidResponseCollector collector = VoidResponseCollector.ignoreLeavers();
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
// Mark the command as a backup write so it can skip some checks
command.addFlags(FlagBitSets.BACKUP_WRITE);
CompletionStage<Void> remoteInvocation = isReplicated ? rpcManager.invokeCommandOnAll(command, collector, rpcOptions) : rpcManager.invokeCommand(owners, command, collector, rpcOptions);
return asyncValue(remoteInvocation.handle((ignored, t) -> {
// Unset the backup write bit as the command will be retried
command.setFlagsBitSet(command.getFlagsBitSet() & ~FlagBitSets.BACKUP_WRITE);
// Switch to the retry policy, in case the primary owner changed and the write already succeeded on the new primary
command.setValueMatcher(originalMatcher.matcherForRetry());
CompletableFutures.rethrowExceptionIfPresent(t);
return localResult;
}));
}
Aggregations