use of org.infinispan.container.entries.NullCacheEntry in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method visitReadOnlyKeyCommand.
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
Object key = command.getKey();
CacheEntry entry = ctx.lookupEntry(key);
if (entry != null) {
// the entry is owned locally (it is NullCacheEntry if it was not found), no need to go remote
return invokeNext(ctx, command);
}
if (!ctx.isOriginLocal()) {
return UnsureResponse.INSTANCE;
}
if (isLocalModeForced(command) || command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP)) {
if (ctx.lookupEntry(command.getKey()) == null) {
entryFactory.wrapExternalEntry(ctx, command.getKey(), NullCacheEntry.getInstance(), false, false);
}
return invokeNext(ctx, command);
}
DistributionInfo info = checkTopology(command).getDistribution(command.getKey());
if (info.primary() == null) {
throw AllOwnersLostException.INSTANCE;
}
ResponseCollector<Response> collector = PassthroughSingleResponseCollector.INSTANCE;
CompletionStage<Response> rpc = rpcManager.invokeCommand(info.primary(), command, collector, rpcManager.getSyncRpcOptions());
return asyncValue(rpc.thenApply(response -> {
if (response.isSuccessful()) {
return ((SuccessfulResponse) response).getResponseValue();
} else if (response instanceof UnsureResponse) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
} else if (response instanceof CacheNotFoundResponse) {
throw AllOwnersLostException.INSTANCE;
} else if (response instanceof ExceptionResponse) {
throw ResponseCollectors.wrapRemoteException(info.primary(), ((ExceptionResponse) response).getException());
} else {
throw new IllegalArgumentException("Unexpected response " + response);
}
}));
}
use of org.infinispan.container.entries.NullCacheEntry in project infinispan by infinispan.
the class BaseDistributionInterceptor method visitReadOnlyKeyCommand.
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
// TODO: repeatable-reads are not implemented, these need to keep the read values on remote side for the duration
// of the transaction, and that requires synchronous invocation of the readonly command on all owners.
// For better consistency, use versioning and write skew check that will fail the transaction when we apply
// the function on different version of the entry than the one previously read
Object key = command.getKey();
CacheEntry entry = ctx.lookupEntry(key);
if (entry != null) {
if (ctx.isOriginLocal()) {
// the entry is owned locally (it is NullCacheEntry if it was not found), no need to go remote
return invokeNext(ctx, command);
} else {
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> wrapFunctionalResultOnNonOriginOnReturn(rv, entry));
}
}
if (!ctx.isOriginLocal()) {
return UnsureResponse.INSTANCE;
}
if (readNeedsRemoteValue(command)) {
LocalizedCacheTopology cacheTopology = checkTopologyId(command);
Collection<Address> owners = cacheTopology.getDistribution(key).readOwners();
if (log.isTraceEnabled())
log.tracef("Doing a remote get for key %s in topology %d to %s", key, cacheTopology.getTopologyId(), owners);
ReadOnlyKeyCommand remoteCommand = remoteReadOnlyCommand(ctx, command);
// make sure that the command topology is set to the value according which we route it
remoteCommand.setTopologyId(cacheTopology.getTopologyId());
CompletionStage<SuccessfulResponse> rpc = rpcManager.invokeCommandStaggered(owners, remoteCommand, new RemoteGetSingleKeyCollector(), rpcManager.getSyncRpcOptions());
return asyncValue(rpc).thenApply(ctx, command, (rCtx, rCommand, response) -> {
Object responseValue = ((SuccessfulResponse) response).getResponseValue();
return unwrapFunctionalResultOnOrigin(rCtx, rCommand.getKey(), responseValue);
});
} else {
// This has LOCAL flags, just wrap NullCacheEntry and let the command run
entryFactory.wrapExternalEntry(ctx, key, NullCacheEntry.getInstance(), true, false);
return invokeNext(ctx, command);
}
}
use of org.infinispan.container.entries.NullCacheEntry in project infinispan by infinispan.
the class DefaultConflictManager method doResolveConflicts.
private void doResolveConflicts(final LocalizedCacheTopology topology, final EntryMergePolicy<K, V> mergePolicy, final Set<Address> preferredNodes) {
boolean userCall = preferredNodes == null;
final Set<Address> preferredPartition = userCall ? new HashSet<>(topology.getCurrentCH().getMembers()) : preferredNodes;
if (log.isTraceEnabled())
log.tracef("Cache %s attempting to resolve conflicts. All Members %s, Installed topology %s, Preferred Partition %s", cacheName, topology.getMembers(), topology, preferredPartition);
final Phaser phaser = new Phaser(1);
getConflicts(topology).forEach(conflictMap -> {
phaser.register();
if (log.isTraceEnabled())
log.tracef("Cache %s conflict detected %s", cacheName, conflictMap);
Collection<CacheEntry<K, V>> entries = conflictMap.values();
Optional<K> optionalEntry = entries.stream().filter(entry -> !(entry instanceof NullCacheEntry)).map(CacheEntry::getKey).findAny();
final K key = optionalEntry.orElseThrow(() -> new CacheException("All returned conflicts are NullCacheEntries. This should not happen!"));
Address primaryReplica = topology.getDistribution(key).primary();
List<Address> preferredEntries = conflictMap.entrySet().stream().map(Map.Entry::getKey).filter(preferredPartition::contains).collect(Collectors.toList());
// If only one entry exists in the preferred partition, then use that entry
CacheEntry<K, V> preferredEntry;
if (preferredEntries.size() == 1) {
preferredEntry = conflictMap.remove(preferredEntries.get(0));
} else {
// If multiple conflicts exist in the preferred partition, then use primary replica from the preferred partition
// If not a merge, then also use primary as preferred entry
// Preferred is null if no entry exists in preferred partition
preferredEntry = conflictMap.remove(primaryReplica);
}
if (log.isTraceEnabled())
log.tracef("Cache %s applying EntryMergePolicy %s to PreferredEntry %s, otherEntries %s", cacheName, mergePolicy.getClass().getName(), preferredEntry, entries);
CacheEntry<K, V> entry = preferredEntry instanceof NullCacheEntry ? null : preferredEntry;
List<CacheEntry<K, V>> otherEntries = entries.stream().filter(e -> !(e instanceof NullCacheEntry)).collect(Collectors.toList());
CacheEntry<K, V> mergedEntry = mergePolicy.merge(entry, otherEntries);
CompletableFuture<V> future;
future = applyMergeResult(userCall, key, mergedEntry);
future.whenComplete((responseMap, exception) -> {
if (log.isTraceEnabled())
log.tracef("Cache %s resolveConflicts future complete for key %s: ResponseMap=%s", cacheName, key, responseMap);
phaser.arriveAndDeregister();
if (exception != null)
log.exceptionDuringConflictResolution(key, exception);
});
});
phaser.arriveAndAwaitAdvance();
if (log.isTraceEnabled())
log.tracef("Cache %s finished resolving conflicts for topologyId=%s", cacheName, topology.getTopologyId());
}
Aggregations