use of org.infinispan.container.entries.RemoteMetadata in project infinispan by infinispan.
the class ScatteredStateConsumerImpl method onTaskCompletion.
@Override
protected void onTaskCompletion(InboundTransferTask inboundTransfer) {
// a bit of overkill since we start these tasks for single segment
IntSet completedSegments = IntSets.immutableEmptySet();
if (log.isTraceEnabled())
log.tracef("Inbound transfer finished %s: %s", inboundTransfer, inboundTransfer.isCompletedSuccessfully() ? "successfully" : "unsuccessfuly");
synchronized (transferMapsLock) {
// transferMapsLock is held when all the tasks are added so we see that all of them are done
for (PrimitiveIterator.OfInt iter = inboundTransfer.getSegments().iterator(); iter.hasNext(); ) {
int segment = iter.nextInt();
List<InboundTransferTask> transfers = transfersBySegment.get(segment);
if (transfers == null) {
// It is possible that two task complete concurrently, one of them checks is all tasks
// for given segments have been completed successfully and (finding out that it's true)
// removes the transfer for given segment. The second task arrives and finds out that
// its record int transfersBySegment is gone, but that's OK, as the segment has been handled.
log.tracef("Transfers for segment %d have not been found.", segment);
} else {
// We are removing here rather than in removeTransfer, because we need to know if we're the last
// finishing task.
transfers.remove(inboundTransfer);
if (transfers.isEmpty()) {
transfersBySegment.remove(segment);
if (log.isTraceEnabled()) {
log.tracef("All transfer tasks for segment %d have completed.", segment);
}
svm.notifyKeyTransferFinished(segment, inboundTransfer.isCompletedSuccessfully(), inboundTransfer.isCancelled());
switch(completedSegments.size()) {
case 0:
completedSegments = IntSets.immutableSet(segment);
break;
case 1:
completedSegments = IntSets.mutableCopyFrom(completedSegments);
// Intentional falls through
default:
completedSegments.set(segment);
}
}
}
}
}
if (completedSegments.isEmpty()) {
log.tracef("Not requesting any values yet because no segments have been completed.");
} else if (inboundTransfer.isCompletedSuccessfully()) {
log.tracef("Requesting values from segments %s, for in-memory keys", completedSegments);
dataContainer.forEach(completedSegments, ice -> {
// TODO: could the version be null in here?
if (ice.getMetadata() instanceof RemoteMetadata) {
Address backup = ((RemoteMetadata) ice.getMetadata()).getAddress();
retrieveEntry(ice.getKey(), backup);
for (Address member : cacheTopology.getActualMembers()) {
if (!member.equals(backup)) {
invalidate(ice.getKey(), ice.getMetadata().version(), member);
}
}
} else {
backupEntry(ice);
for (Address member : nonBackupAddresses) {
invalidate(ice.getKey(), ice.getMetadata().version(), member);
}
}
});
// With passivation, some key could be activated here and we could miss it,
// but then it should be broadcast-loaded in PrefetchInvalidationInterceptor
Publisher<MarshallableEntry<Object, Object>> persistencePublisher = persistenceManager.publishEntries(completedSegments, k -> dataContainer.peek(k) == null, true, true, Configurations::isStateTransferStore);
try {
blockingSubscribe(Flowable.fromPublisher(persistencePublisher).doOnNext(me -> {
try {
Metadata metadata = me.getMetadata();
if (metadata instanceof RemoteMetadata) {
Address backup = ((RemoteMetadata) metadata).getAddress();
retrieveEntry(me.getKey(), backup);
for (Address member : cacheTopology.getActualMembers()) {
if (!member.equals(backup)) {
invalidate(me.getKey(), metadata.version(), member);
}
}
} else {
backupEntry(entryFactory.create(me.getKey(), me.getValue(), me.getMetadata()));
for (Address member : nonBackupAddresses) {
invalidate(me.getKey(), metadata.version(), member);
}
}
} catch (CacheException e) {
log.failedLoadingValueFromCacheStore(me.getKey(), e);
}
}));
} catch (CacheException e) {
PERSISTENCE.failedLoadingKeysFromCacheStore(e);
}
}
boolean lastTransfer = false;
synchronized (transferMapsLock) {
inboundSegments.removeAll(completedSegments);
log.tracef("Unfinished inbound segments: " + inboundSegments);
if (inboundSegments.isEmpty()) {
lastTransfer = true;
}
}
if (lastTransfer) {
for (Map.Entry<Address, BlockingQueue<Object>> pair : retrievedEntries.entrySet()) {
BlockingQueue<Object> queue = pair.getValue();
List<Object> keys = new ArrayList<>(queue.size());
queue.drainTo(keys);
if (!keys.isEmpty()) {
getValuesAndApply(pair.getKey(), keys);
}
}
List<InternalCacheEntry<?, ?>> entries = new ArrayList<>(backupQueue.size());
backupQueue.drainTo(entries);
if (!entries.isEmpty()) {
backupEntries(entries);
}
for (Map.Entry<Address, BlockingQueue<KeyAndVersion>> pair : invalidations.entrySet()) {
BlockingQueue<KeyAndVersion> queue = pair.getValue();
List<KeyAndVersion> list = new ArrayList<>(queue.size());
queue.drainTo(list);
if (!list.isEmpty()) {
invalidate(list, pair.getKey());
}
}
}
// we must not remove the transfer before the requests for values are sent
// as we could notify the end of rebalance too soon
removeTransfer(inboundTransfer);
if (lastTransfer) {
if (log.isTraceEnabled())
log.tracef("Inbound transfer removed, chunk counter is %s", chunkCounter.get());
if (chunkCounter.get() == 0) {
// No values to transfer after all the keys were received, we can end state transfer immediately
notifyEndOfStateTransferIfNeeded();
}
}
}
use of org.infinispan.container.entries.RemoteMetadata in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method updateEntryIfNoChange.
private boolean updateEntryIfNoChange(RepeatableReadEntry entry) {
// We cannot delegate the dataContainer.compute() to entry.commit() as we need to reliably
// retrieve previous value and metadata, but the entry API does not provide these.
dataContainer.compute(entry.getKey(), (key, oldEntry, factory) -> {
// newMetadata is null in case of local-mode write on non-primary owners
Metadata newMetadata = entry.getMetadata();
if (oldEntry == null) {
if (entry.getOldValue() != null) {
if (log.isTraceEnabled()) {
log.trace("Non-null value in context, not committing");
}
throw new ConcurrentChangeException();
}
if (entry.getValue() == null && newMetadata == null) {
if (log.isTraceEnabled()) {
log.trace("No previous record and this is a removal, not committing anything.");
}
return null;
} else {
if (log.isTraceEnabled()) {
log.trace("Committing new entry " + entry);
}
entry.setCommitted();
return factory.create(entry);
}
}
Metadata oldMetadata = oldEntry.getMetadata();
EntryVersion oldVersion = oldMetadata == null ? null : oldMetadata.version();
Metadata seenMetadata = entry.getOldMetadata();
EntryVersion seenVersion = seenMetadata == null ? null : seenMetadata.version();
if (oldVersion == null) {
if (seenVersion != null) {
if (log.isTraceEnabled()) {
log.tracef("Current version is null but seen version is %s, throwing", seenVersion);
}
throw new ConcurrentChangeException();
}
} else if (seenVersion == null) {
if (oldEntry.canExpire() && oldEntry.isExpired(timeService.wallClockTime())) {
if (log.isTraceEnabled()) {
log.trace("Current entry is expired and therefore we haven't seen it");
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Current version is %s but seen version is null, throwing", oldVersion);
}
throw new ConcurrentChangeException();
}
} else if (seenVersion.compareTo(oldVersion) != InequalVersionComparisonResult.EQUAL) {
if (log.isTraceEnabled()) {
log.tracef("Current version is %s but seen version is %s, throwing", oldVersion, seenVersion);
}
throw new ConcurrentChangeException();
}
InequalVersionComparisonResult comparisonResult;
if (oldVersion == null || newMetadata == null || newMetadata.version() == null || (comparisonResult = oldMetadata.version().compareTo(newMetadata.version())) == InequalVersionComparisonResult.BEFORE || (oldMetadata instanceof RemoteMetadata && comparisonResult == InequalVersionComparisonResult.EQUAL)) {
if (log.isTraceEnabled()) {
log.tracef("Committing entry %s, replaced %s", entry, oldEntry);
}
entry.setCommitted();
if (entry.getValue() != null || newMetadata != null) {
return factory.create(entry);
} else {
return null;
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Not committing %s, current entry is %s", entry, oldEntry);
}
return oldEntry;
}
});
return entry.isCommitted();
}
use of org.infinispan.container.entries.RemoteMetadata in project infinispan by infinispan.
the class ScatteredDistributionInterceptor method updateEntryIfNewer.
private boolean updateEntryIfNewer(RepeatableReadEntry entry) {
// We cannot delegate the dataContainer.compute() to entry.commit() as we need to reliably
// retrieve previous value and metadata, but the entry API does not provide these.
dataContainer.compute(entry.getKey(), (key, oldEntry, factory) -> {
// newMetadata is null in case of local-mode write
Metadata newMetadata = entry.getMetadata();
if (oldEntry == null) {
if (entry.getValue() == null && newMetadata == null) {
if (log.isTraceEnabled()) {
log.trace("No previous record and this is a removal, not committing anything.");
}
return null;
} else {
if (log.isTraceEnabled()) {
log.trace("Committing new entry " + entry);
}
entry.setCommitted();
return factory.create(entry);
}
}
Metadata oldMetadata = oldEntry.getMetadata();
InequalVersionComparisonResult comparisonResult;
if (oldMetadata == null || oldMetadata.version() == null || newMetadata == null || newMetadata.version() == null || (comparisonResult = oldMetadata.version().compareTo(newMetadata.version())) == InequalVersionComparisonResult.BEFORE || (oldMetadata instanceof RemoteMetadata && comparisonResult == InequalVersionComparisonResult.EQUAL)) {
if (log.isTraceEnabled()) {
log.tracef("Committing entry %s, replaced %s", entry, oldEntry);
}
entry.setCommitted();
if (entry.getValue() != null || newMetadata != null) {
return factory.create(entry);
} else {
return null;
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Not committing %s, current entry is %s", entry, oldEntry);
}
return oldEntry;
}
});
return entry.isCommitted();
}
use of org.infinispan.container.entries.RemoteMetadata in project infinispan by infinispan.
the class AbstractAnchoredKeysTest method assertLocation.
protected void assertLocation(Object key, Address expectedAddress, Object expectedValue) {
for (Cache<Object, Object> cache : caches()) {
Address address = cache.getAdvancedCache().getRpcManager().getAddress();
Object storedKey = cache.getAdvancedCache().getKeyDataConversion().toStorage(key);
InternalCacheEntry<Object, Object> entry = cache.getAdvancedCache().getDataContainer().peek(storedKey);
if (address.equals(expectedAddress)) {
Object storedValue = entry != null ? entry.getValue() : null;
Object value = cache.getAdvancedCache().getValueDataConversion().fromStorage(storedValue);
assertEquals("Wrong value for " + key + " on " + address, expectedValue, value);
Metadata metadata = entry != null ? entry.getMetadata() : null;
assertFalse("No location expected for " + key + " on " + address + ", got " + metadata, metadata instanceof RemoteMetadata);
} else {
assertNull("No value expected for key " + key + " on " + address, entry.getValue());
Address location = ((RemoteMetadata) entry.getMetadata()).getAddress();
assertEquals("Wrong location for " + key + " on " + address, expectedAddress, location);
}
}
}
use of org.infinispan.container.entries.RemoteMetadata in project infinispan by infinispan.
the class PrefetchInterceptor method retrieveRemoteValues.
// TODO: this is not completely aligned with single-entry prefetch
private <C extends VisitableCommand & TopologyAffectedCommand> InvocationStage retrieveRemoteValues(InvocationContext ctx, C originCommand, List<?> keys) {
if (log.isTraceEnabled()) {
log.tracef("Prefetching entries for keys %s using broadcast", keys);
}
ClusteredGetAllCommand<?, ?> command = commandsFactory.buildClusteredGetAllCommand(keys, FlagBitSets.SKIP_OWNERSHIP_CHECK, null);
command.setTopologyId(originCommand.getTopologyId());
CompletionStage<Map<Address, Response>> rpcFuture = rpcManager.invokeCommandOnAll(command, MapResponseCollector.ignoreLeavers(), rpcManager.getSyncRpcOptions());
return asyncValue(rpcFuture).thenApplyMakeStage(ctx, originCommand, (rCtx, topologyAffectedCommand, rv) -> {
Map<Address, Response> responseMap = (Map<Address, Response>) rv;
InternalCacheValue<V>[] maxValues = new InternalCacheValue[keys.size()];
for (Response response : responseMap.values()) {
if (!response.isSuccessful()) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
InternalCacheValue<V>[] values = ((SuccessfulResponse<InternalCacheValue<V>[]>) response).getResponseValue();
int i = 0;
for (InternalCacheValue<V> icv : values) {
if (icv != null) {
Metadata metadata = icv.getMetadata();
if (metadata instanceof RemoteMetadata) {
// not sure if this can happen, but let's be on the safe side
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
if (maxValues[i] == null) {
maxValues[i] = icv;
} else if (metadata != null && metadata.version() != null) {
Metadata maxMetadata;
if ((maxMetadata = maxValues[i].getMetadata()) == null || maxMetadata.version() == null || maxMetadata.version().compareTo(metadata.version()) == InequalVersionComparisonResult.BEFORE) {
maxValues[i] = icv;
}
}
}
++i;
}
}
Map<Object, InternalCacheValue<V>> map = new HashMap<>(keys.size());
for (int i = 0; i < maxValues.length; ++i) {
if (maxValues[i] != null) {
map.put(keys.get(i), maxValues[i]);
}
}
if (log.isTraceEnabled()) {
log.tracef("Prefetched values are %s", map);
}
if (map.isEmpty()) {
return CompletableFutures.completedNull();
}
// from the main command correct.
for (Map.Entry<Object, InternalCacheValue<V>> entry : map.entrySet()) {
entryFactory.wrapExternalEntry(rCtx, entry.getKey(), entry.getValue().toInternalCacheEntry(entry.getKey()), true, true);
}
PutMapCommand putMapCommand = commandsFactory.buildPutMapCommand(map, null, STATE_TRANSFER_FLAGS);
putMapCommand.setTopologyId(topologyAffectedCommand.getTopologyId());
return invokeNext(rCtx, putMapCommand);
});
}
Aggregations