use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCachePartitionExchangeManager method mergeExchangesOnCoordinator.
/**
* @param curFut Current active exchange future.
* @return {@code False} if need wait messages for merged exchanges.
*/
public boolean mergeExchangesOnCoordinator(GridDhtPartitionsExchangeFuture curFut) {
if (IGNITE_EXCHANGE_MERGE_DELAY > 0) {
try {
U.sleep(IGNITE_EXCHANGE_MERGE_DELAY);
} catch (IgniteInterruptedCheckedException e) {
U.warn(log, "Failed to wait for exchange merge, thread interrupted: " + e);
return true;
}
}
AffinityTopologyVersion exchMergeTestWaitVer = this.exchMergeTestWaitVer;
if (exchMergeTestWaitVer != null) {
if (log.isInfoEnabled()) {
log.info("Exchange merge test, waiting for version [exch=" + curFut.initialVersion() + ", waitVer=" + exchMergeTestWaitVer + ']');
}
long end = U.currentTimeMillis() + 10_000;
while (U.currentTimeMillis() < end) {
boolean found = false;
for (CachePartitionExchangeWorkerTask task : exchWorker.futQ) {
if (task instanceof GridDhtPartitionsExchangeFuture) {
GridDhtPartitionsExchangeFuture fut = (GridDhtPartitionsExchangeFuture) task;
if (exchMergeTestWaitVer.equals(fut.initialVersion())) {
if (log.isInfoEnabled())
log.info("Exchange merge test, found awaited version: " + exchMergeTestWaitVer);
found = true;
break;
}
}
}
if (found)
break;
else {
try {
U.sleep(100);
} catch (IgniteInterruptedCheckedException e) {
break;
}
}
}
this.exchMergeTestWaitVer = null;
}
synchronized (curFut.mutex()) {
int awaited = 0;
for (CachePartitionExchangeWorkerTask task : exchWorker.futQ) {
if (task instanceof GridDhtPartitionsExchangeFuture) {
GridDhtPartitionsExchangeFuture fut = (GridDhtPartitionsExchangeFuture) task;
DiscoveryEvent evt = fut.firstEvent();
if (evt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
if (log.isInfoEnabled())
log.info("Stop merge, custom event found: " + evt);
break;
}
ClusterNode node = evt.eventNode();
if (!curFut.context().supportsMergeExchanges(node)) {
if (log.isInfoEnabled())
log.info("Stop merge, node does not support merge: " + node);
break;
}
if (evt.type() == EVT_NODE_JOINED && cctx.cache().hasCachesReceivedFromJoin(node)) {
if (log.isInfoEnabled())
log.info("Stop merge, received caches from node: " + node);
break;
}
if (log.isInfoEnabled()) {
log.info("Merge exchange future [curFut=" + curFut.initialVersion() + ", mergedFut=" + fut.initialVersion() + ", evt=" + IgniteUtils.gridEventName(fut.firstEvent().type()) + ", evtNode=" + fut.firstEvent().eventNode().id() + ", evtNodeClient=" + CU.clientNode(fut.firstEvent().eventNode()) + ']');
}
curFut.context().events().addEvent(fut.initialVersion(), fut.firstEvent(), fut.firstEventCache());
if (evt.type() == EVT_NODE_JOINED) {
if (fut.mergeJoinExchange(curFut))
awaited++;
}
} else {
if (!task.skipForExchangeMerge()) {
if (log.isInfoEnabled())
log.info("Stop merge, custom task found: " + task);
break;
}
}
}
return awaited == 0;
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAdapter method getAllAsync0.
/**
* @param keys Keys.
* @param readerArgs Near cache reader will be added if not null.
* @param readThrough Read-through flag.
* @param checkTx Check local transaction flag.
* @param subjId Subject ID.
* @param taskName Task name/
* @param deserializeBinary Deserialize binary flag.
* @param expiry Expiry policy.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @return Future.
*/
protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(@Nullable final Collection<KeyCacheObject> keys, @Nullable final ReaderArguments readerArgs, final boolean readThrough, boolean checkTx, @Nullable final UUID subjId, final String taskName, final boolean deserializeBinary, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean skipVals, final boolean keepCacheObjects, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
GridNearTxLocal tx = null;
if (checkTx) {
try {
checkJta();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
tx = ctx.tm().threadLocalTx(ctx);
}
if (tx == null || tx.implicit()) {
Map<KeyCacheObject, EntryGetResult> misses = null;
Set<GridCacheEntryEx> newLocalEntries = null;
final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : tx.topologyVersion();
try {
int keysSize = keys.size();
GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture();
Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/
true, null, keys) : null;
if (ex != null)
return new GridFinishedFuture<>(ex);
final Map<K1, V1> map = keysSize == 1 ? (Map<K1, V1>) new IgniteBiTuple<>() : U.<K1, V1>newHashMap(keysSize);
final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough();
boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null);
for (KeyCacheObject key : keys) {
while (true) {
try {
EntryGetResult res = null;
boolean evt = !skipVals;
boolean updateMetrics = !skipVals;
GridCacheEntryEx entry = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime != 0) {
if (expireTime > U.currentTimeMillis()) {
res = new EntryGetWithTtlResult(row.value(), row.version(), false, expireTime, 0);
} else
skipEntry = false;
} else
res = new EntryGetResult(row.value(), row.version(), false);
}
if (res != null) {
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
if (updateMetrics && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
} else if (storeEnabled)
skipEntry = false;
}
if (!skipEntry) {
boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null;
entry = entryEx(key);
if (entry == null) {
if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(false);
break;
}
if (isNewLocalEntry) {
if (newLocalEntries == null)
newLocalEntries = new HashSet<>();
newLocalEntries.add(entry);
}
if (storeEnabled) {
res = entry.innerGetAndReserveForLoad(updateMetrics, evt, subjId, taskName, expiry, !deserializeBinary, readerArgs);
assert res != null;
if (res.value() == null) {
if (misses == null)
misses = new HashMap<>();
misses.put(key, res);
res = null;
}
} else {
res = entry.innerGetVersioned(null, null, updateMetrics, evt, subjId, null, taskName, expiry, !deserializeBinary, readerArgs);
if (res == null)
ctx.evicts().touch(entry, topVer);
}
}
if (res != null) {
ctx.addResult(map, key, res, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
ctx.evicts().touch(entry, topVer);
if (keysSize == 1)
// Safe to return because no locks are required in READ_COMMITTED mode.
return new GridFinishedFuture<>(map);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key);
}
}
}
if (storeEnabled && misses != null) {
final Map<KeyCacheObject, EntryGetResult> loadKeys = misses;
final IgniteTxLocalAdapter tx0 = tx;
final Collection<KeyCacheObject> loaded = new HashSet<>();
return new GridEmbeddedFuture(ctx.closures().callLocalSafe(ctx.projectSafe(new GPC<Map<K1, V1>>() {
@Override
public Map<K1, V1> call() throws Exception {
ctx.store().loadAll(null, /*tx*/
loadKeys.keySet(), new CI2<KeyCacheObject, Object>() {
@Override
public void apply(KeyCacheObject key, Object val) {
EntryGetResult res = loadKeys.get(key);
if (res == null || val == null)
return;
loaded.add(key);
CacheObject cacheVal = ctx.toCacheObject(val);
while (true) {
GridCacheEntryEx entry = null;
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
}
ctx.shared().database().checkpointReadLock();
try {
entry = entryEx(key);
entry.unswap();
EntryGetResult verVal = entry.versionedValue(cacheVal, res.version(), null, expiry, readerArgs);
if (log.isDebugEnabled())
log.debug("Set value loaded from store into entry [" + "oldVer=" + res.version() + ", newVer=" + verVal.version() + ", " + "entry=" + entry + ']');
// Don't put key-value pair into result map if value is null.
if (verVal.value() != null) {
ctx.addResult(map, key, verVal, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED))
ctx.evicts().touch(entry, topVer);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during getAllAsync (will retry): " + entry);
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
});
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return map;
}
}), true), new C2<Map<K, V>, Exception, IgniteInternalFuture<Map<K, V>>>() {
@Override
public IgniteInternalFuture<Map<K, V>> apply(Map<K, V> map, Exception e) {
if (e != null) {
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return new GridFinishedFuture<>(e);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) {
Collection<KeyCacheObject> notFound = new HashSet<>(loadKeys.keySet());
notFound.removeAll(loaded);
// Touch entries that were not found in store.
for (KeyCacheObject key : notFound) {
GridCacheEntryEx entry = peekEx(key);
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
// There were no misses.
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
}, new C2<Map<K1, V1>, Exception, Map<K1, V1>>() {
@Override
public Map<K1, V1> apply(Map<K1, V1> loaded, Exception e) {
if (e == null)
map.putAll(loaded);
return map;
}
});
} else
// Misses can be non-zero only if store is enabled.
assert misses == null;
return new GridFinishedFuture<>(map);
} catch (RuntimeException | AssertionError e) {
if (misses != null) {
for (KeyCacheObject key0 : misses.keySet()) ctx.evicts().touch(peekEx(key0), topVer);
}
if (newLocalEntries != null) {
for (GridCacheEntryEx entry : newLocalEntries) removeEntry(entry);
}
return new GridFinishedFuture<>(e);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
} else {
return asyncOp(tx, new AsyncOp<Map<K1, V1>>(keys) {
@Override
public IgniteInternalFuture<Map<K1, V1>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
return tx.getAllAsync(ctx, readyTopVer, keys, deserializeBinary, skipVals, false, !readThrough, recovery, needVer);
}
}, ctx.operationContextPerCall(), /*retry*/
false);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheAdapter method localPeek.
/**
* {@inheritDoc}
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
@Override
public final V localPeek(K key, CachePeekMode[] peekModes, @Nullable IgniteCacheExpiryPolicy plc) throws IgniteCheckedException {
A.notNull(key, "key");
if (keyCheck)
validateCacheKey(key);
ctx.checkSecurity(SecurityPermission.CACHE_READ);
PeekModes modes = parsePeekModes(peekModes, false);
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
CacheObject cacheVal = null;
if (!ctx.isLocal()) {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
int part = ctx.affinity().partition(cacheKey);
boolean nearKey;
if (!(modes.near && modes.primary && modes.backup)) {
boolean keyPrimary = ctx.affinity().primaryByPartition(ctx.localNode(), part, topVer);
if (keyPrimary) {
if (!modes.primary)
return null;
nearKey = false;
} else {
boolean keyBackup = ctx.affinity().partitionBelongs(ctx.localNode(), part, topVer);
if (keyBackup) {
if (!modes.backup)
return null;
nearKey = false;
} else {
if (!modes.near)
return null;
nearKey = true;
// Swap and offheap are disabled for near cache.
modes.offheap = false;
}
}
} else {
nearKey = !ctx.affinity().partitionBelongs(ctx.localNode(), part, topVer);
if (nearKey) {
// Swap and offheap are disabled for near cache.
modes.offheap = false;
}
}
if (nearKey && !ctx.isNear())
return null;
GridCacheEntryEx e;
GridCacheContext ctx0;
while (true) {
if (nearKey) {
ctx0 = context();
e = peekEx(key);
} else {
ctx0 = ctx.isNear() ? ctx.near().dht().context() : ctx;
e = modes.offheap ? ctx0.cache().entryEx(key) : ctx0.cache().peekEx(key);
}
if (e != null) {
ctx.shared().database().checkpointReadLock();
try {
cacheVal = e.peek(modes.heap, modes.offheap, topVer, plc);
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during 'peek': " + key);
continue;
} finally {
ctx0.evicts().touch(e, null);
ctx.shared().database().checkpointReadUnlock();
}
}
break;
}
} else {
while (true) {
try {
cacheVal = localCachePeek0(cacheKey, modes.heap, modes.offheap, plc);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during 'peek': " + key);
// continue
}
}
}
Object val = ctx.unwrapBinaryIfNeeded(cacheVal, ctx.keepBinary(), false);
return (V) val;
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class CacheAffinitySharedManager method onChangeAffinityMessage.
/**
* Called on exchange initiated by {@link CacheAffinityChangeMessage} which sent after rebalance finished.
*
* @param exchFut Exchange future.
* @param crd Coordinator flag.
* @param msg Message.
* @throws IgniteCheckedException If failed.
*/
public void onChangeAffinityMessage(final GridDhtPartitionsExchangeFuture exchFut, boolean crd, final CacheAffinityChangeMessage msg) throws IgniteCheckedException {
assert msg.topologyVersion() != null && msg.exchangeId() == null : msg;
final AffinityTopologyVersion topVer = exchFut.initialVersion();
if (log.isDebugEnabled()) {
log.debug("Process affinity change message [exchVer=" + topVer + ", msgVer=" + msg.topologyVersion() + ']');
}
final Map<Integer, Map<Integer, List<UUID>>> affChange = msg.assignmentChange();
assert !F.isEmpty(affChange) : msg;
final Map<Integer, IgniteUuid> deploymentIds = msg.cacheDeploymentIds();
final Map<Object, List<List<ClusterNode>>> affCache = new HashMap<>();
forAllCacheGroups(crd, new IgniteInClosureX<GridAffinityAssignmentCache>() {
@Override
public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException {
AffinityTopologyVersion affTopVer = aff.lastVersion();
assert affTopVer.topologyVersion() > 0 : affTopVer;
CacheGroupDescriptor desc = caches.group(aff.groupId());
assert desc != null : aff.cacheOrGroupName();
IgniteUuid deploymentId = desc.deploymentId();
if (!deploymentId.equals(deploymentIds.get(aff.groupId()))) {
aff.clientEventTopologyChange(exchFut.firstEvent(), topVer);
return;
}
Map<Integer, List<UUID>> change = affChange.get(aff.groupId());
if (change != null) {
assert !change.isEmpty() : msg;
List<List<ClusterNode>> curAff = aff.assignments(affTopVer);
List<List<ClusterNode>> assignment = new ArrayList<>(curAff);
for (Map.Entry<Integer, List<UUID>> e : change.entrySet()) {
Integer part = e.getKey();
List<ClusterNode> nodes = toNodes(topVer, e.getValue());
assert !nodes.equals(assignment.get(part)) : "Assignment did not change " + "[cacheGrp=" + aff.cacheOrGroupName() + ", part=" + part + ", cur=" + F.nodeIds(assignment.get(part)) + ", new=" + F.nodeIds(nodes) + ", exchVer=" + exchFut.initialVersion() + ", msgVer=" + msg.topologyVersion() + ']';
assignment.set(part, nodes);
}
aff.initialize(topVer, cachedAssignment(aff, assignment, affCache));
} else
aff.clientEventTopologyChange(exchFut.firstEvent(), topVer);
}
});
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class CacheAffinitySharedManager method onExchangeChangeAffinityMessage.
/**
* Called when received {@link CacheAffinityChangeMessage} which should complete exchange.
*
* @param exchFut Exchange future.
* @param crd Coordinator flag.
* @param msg Affinity change message.
*/
public void onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture exchFut, boolean crd, CacheAffinityChangeMessage msg) {
if (log.isDebugEnabled()) {
log.debug("Process exchange affinity change message [exchVer=" + exchFut.initialVersion() + ", msg=" + msg + ']');
}
assert exchFut.exchangeId().equals(msg.exchangeId()) : msg;
final AffinityTopologyVersion topVer = exchFut.initialVersion();
final Map<Integer, Map<Integer, List<UUID>>> assignment = msg.assignmentChange();
assert assignment != null;
final Map<Object, List<List<ClusterNode>>> affCache = new HashMap<>();
forAllCacheGroups(crd, new IgniteInClosureX<GridAffinityAssignmentCache>() {
@Override
public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException {
List<List<ClusterNode>> idealAssignment = aff.idealAssignment();
assert idealAssignment != null;
Map<Integer, List<UUID>> cacheAssignment = assignment.get(aff.groupId());
List<List<ClusterNode>> newAssignment;
if (cacheAssignment != null) {
newAssignment = new ArrayList<>(idealAssignment);
for (Map.Entry<Integer, List<UUID>> e : cacheAssignment.entrySet()) newAssignment.set(e.getKey(), toNodes(topVer, e.getValue()));
} else
newAssignment = idealAssignment;
aff.initialize(topVer, cachedAssignment(aff, newAssignment, affCache));
}
});
}
Aggregations