use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridCacheMultiUpdateLockSelfTest method checkMultiUpdateLocks.
/**
* @param nearEnabled Near enabled flag.
* @throws Exception If failed.
*/
private void checkMultiUpdateLocks(boolean nearEnabled) throws Exception {
this.nearEnabled = nearEnabled;
startGrids(3);
try {
IgniteKernal g = (IgniteKernal) grid(0);
GridCacheContext<Object, Object> cctx = g.internalCache(DEFAULT_CACHE_NAME).context();
GridDhtCacheAdapter cache = nearEnabled ? cctx.near().dht() : cctx.colocated();
AffinityTopologyVersion topVer = cache.beginMultiUpdate();
IgniteInternalFuture<?> startFut;
try {
assertEquals(3, topVer.topologyVersion());
final AtomicBoolean started = new AtomicBoolean();
startFut = multithreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
info(">>>> Starting grid.");
Ignite g4 = startGrid(4);
started.set(true);
IgniteCache<Object, Object> c = g4.cache(DEFAULT_CACHE_NAME);
info(">>>> Checking tx in new grid.");
try (Transaction tx = g4.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
assertEquals(2, c.get("a"));
assertEquals(4, c.get("b"));
assertEquals(6, c.get("c"));
}
return null;
}
}, 1);
U.sleep(200);
info(">>>> Checking grid has not started yet.");
assertFalse(started.get());
// Check we can proceed with transactions.
IgniteCache<Object, Object> cache0 = g.cache(DEFAULT_CACHE_NAME);
info(">>>> Checking tx commit.");
Transaction tx = g.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
try {
cache0.put("a", 1);
cache0.put("b", 2);
cache0.put("c", 3);
tx.commit();
} finally {
tx.close();
}
info(">>>> Checking grid still is not started");
assertFalse(started.get());
tx = g.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
try {
cache0.put("a", 2);
cache0.put("b", 4);
cache0.put("c", 6);
tx.commit();
} finally {
tx.close();
}
} finally {
info(">>>> Releasing multi update.");
cache.endMultiUpdate();
}
info("Waiting for thread termination.");
startFut.get();
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridNearTxLocal method enlistRead.
/**
* @param cacheCtx Cache context.
* @param keys Key to enlist.
* @param expiryPlc Explicitly specified expiry policy for entry.
* @param map Return map.
* @param missed Map of missed keys.
* @param keysCnt Keys count (to avoid call to {@code Collection.size()}).
* @param deserializeBinary Deserialize binary flag.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects flag.
* @param skipStore Skip store flag.
* @throws IgniteCheckedException If failed.
* @return Enlisted keys.
*/
@SuppressWarnings({ "RedundantTypeArguments" })
private <K, V> Collection<KeyCacheObject> enlistRead(final GridCacheContext cacheCtx, @Nullable AffinityTopologyVersion entryTopVer, Collection<KeyCacheObject> keys, @Nullable ExpiryPolicy expiryPlc, Map<K, V> map, Map<KeyCacheObject, GridCacheVersion> missed, int keysCnt, boolean deserializeBinary, boolean skipVals, boolean keepCacheObjects, boolean skipStore, boolean recovery, final boolean needVer) throws IgniteCheckedException {
assert !F.isEmpty(keys);
assert keysCnt == keys.size();
cacheCtx.checkSecurity(SecurityPermission.CACHE_READ);
boolean single = keysCnt == 1;
Collection<KeyCacheObject> lockKeys = null;
AffinityTopologyVersion topVer = entryTopVer != null ? entryTopVer : topologyVersion();
boolean needReadVer = (serializable() && optimistic()) || needVer;
// outside of this loop.
for (KeyCacheObject key : keys) {
if ((pessimistic() || needReadVer) && !readCommitted() && !skipVals)
addActiveCache(cacheCtx, recovery);
IgniteTxKey txKey = cacheCtx.txKey(key);
// Check write map (always check writes first).
IgniteTxEntry txEntry = entry(txKey);
// Either non-read-committed or there was a previous write.
if (txEntry != null) {
CacheObject val = txEntry.value();
if (txEntry.hasValue()) {
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
if (val != null) {
GridCacheVersion ver = null;
if (needVer) {
if (txEntry.op() != READ)
ver = IgniteTxEntry.GET_ENTRY_INVALID_VER_UPDATED;
else {
ver = txEntry.entryReadVersion();
if (ver == null && pessimistic()) {
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
ver = cached.isNear() ? ((GridNearCacheEntry) cached).dhtVersion() : cached.version();
break;
} catch (GridCacheEntryRemovedException ignored) {
txEntry.cached(entryEx(cacheCtx, txEntry.txKey(), topVer));
}
}
}
if (ver == null) {
assert optimistic() && repeatableRead() : this;
ver = IgniteTxEntry.GET_ENTRY_INVALID_VER_AFTER_GET;
}
}
assert ver != null;
}
cacheCtx.addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, false, ver, 0, 0);
}
} else {
assert txEntry.op() == TRANSFORM;
while (true) {
try {
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
Object transformClo = (txEntry.op() == TRANSFORM && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null;
if (needVer) {
getRes = txEntry.cached().innerGetVersioned(null, this, /*update-metrics*/
true, /*event*/
!skipVals, CU.subjectId(this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary(), null);
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = txEntry.cached().innerGet(null, this, /*read-through*/
false, /*metrics*/
true, /*event*/
!skipVals, CU.subjectId(this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary());
}
if (val != null) {
if (!readCommitted() && !skipVals)
txEntry.readValue(val);
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
cacheCtx.addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
} else
missed.put(key, txEntry.cached().version());
break;
} catch (GridCacheEntryRemovedException ignored) {
txEntry.cached(entryEx(cacheCtx, txEntry.txKey(), topVer));
}
}
}
} else // First time access within transaction.
{
if (lockKeys == null && !skipVals)
lockKeys = single ? Collections.singleton(key) : new ArrayList<KeyCacheObject>(keysCnt);
if (!single && !skipVals)
lockKeys.add(key);
while (true) {
GridCacheEntryEx entry = entryEx(cacheCtx, txKey, topVer);
try {
GridCacheVersion ver = entry.version();
CacheObject val = null;
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
if (!pessimistic() || readCommitted() && !skipVals) {
IgniteCacheExpiryPolicy accessPlc = optimistic() ? accessPolicy(cacheCtx, txKey, expiryPlc) : null;
if (needReadVer) {
getRes = primaryLocal(entry) ? entry.innerGetVersioned(null, this, /*metrics*/
true, /*event*/
true, CU.subjectId(this, cctx), null, resolveTaskName(), accessPlc, !deserializeBinary, null) : null;
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = entry.innerGet(null, this, /*read-through*/
false, /*metrics*/
true, /*event*/
!skipVals, CU.subjectId(this, cctx), null, resolveTaskName(), accessPlc, !deserializeBinary);
}
if (val != null) {
cacheCtx.addResult(map, key, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
} else
missed.put(key, ver);
} else
// We must wait for the lock in pessimistic mode.
missed.put(key, ver);
if (!readCommitted() && !skipVals) {
txEntry = addEntry(READ, val, null, null, entry, expiryPlc, null, true, -1L, -1L, null, skipStore, !deserializeBinary, CU.isNearEnabled(cacheCtx));
// for non-pessimistic if value is not null.
if (val != null && !pessimistic()) {
txEntry.markValid();
if (needReadVer) {
assert readVer != null;
txEntry.entryReadVersion(readVer);
}
}
}
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in transaction getAllAsync(..) (will retry): " + key);
} finally {
if (entry != null && readCommitted()) {
if (cacheCtx.isNear()) {
if (cacheCtx.affinity().partitionBelongs(cacheCtx.localNode(), entry.partition(), topVer)) {
if (entry.markObsolete(xidVer))
cacheCtx.cache().removeEntry(entry);
}
} else
entry.context().evicts().touch(entry, topVer);
}
}
}
}
}
return lockKeys != null ? lockKeys : Collections.<KeyCacheObject>emptyList();
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridNearTxLocal method getAllAsync.
/**
* @param cacheCtx Cache context.
* @param keys Keys to get.
* @param deserializeBinary Deserialize binary flag.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects
* @param skipStore Skip store flag.
* @return Future for this get.
*/
@SuppressWarnings("unchecked")
public <K, V> IgniteInternalFuture<Map<K, V>> getAllAsync(final GridCacheContext cacheCtx, @Nullable final AffinityTopologyVersion entryTopVer, Collection<KeyCacheObject> keys, final boolean deserializeBinary, final boolean skipVals, final boolean keepCacheObjects, final boolean skipStore, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
init();
int keysCnt = keys.size();
boolean single = keysCnt == 1;
try {
checkValid();
final Map<K, V> retMap = new GridLeanMap<>(keysCnt);
final Map<KeyCacheObject, GridCacheVersion> missed = new GridLeanMap<>(pessimistic() ? keysCnt : 0);
CacheOperationContext opCtx = cacheCtx.operationContextPerCall();
ExpiryPolicy expiryPlc = opCtx != null ? opCtx.expiry() : null;
final Collection<KeyCacheObject> lockKeys = enlistRead(cacheCtx, entryTopVer, keys, expiryPlc, retMap, missed, keysCnt, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer);
if (single && missed.isEmpty())
return new GridFinishedFuture<>(retMap);
// Handle locks.
if (pessimistic() && !readCommitted() && !skipVals) {
if (expiryPlc == null)
expiryPlc = cacheCtx.expiry();
long accessTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForAccess()) : CU.TTL_NOT_CHANGED;
long createTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForCreation()) : CU.TTL_NOT_CHANGED;
long timeout = remainingTime();
if (timeout == -1)
return new GridFinishedFuture<>(timeoutException());
IgniteInternalFuture<Boolean> fut = cacheCtx.cache().txLockAsync(lockKeys, timeout, this, true, true, isolation, isInvalidate(), createTtl, accessTtl);
final ExpiryPolicy expiryPlc0 = expiryPlc;
PLC2<Map<K, V>> plc2 = new PLC2<Map<K, V>>() {
@Override
public IgniteInternalFuture<Map<K, V>> postLock() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Acquired transaction lock for read on keys: " + lockKeys);
// Load keys only after the locks have been acquired.
for (KeyCacheObject cacheKey : lockKeys) {
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, true));
if (retMap.containsKey(keyVal))
// We already have a return value.
continue;
IgniteTxKey txKey = cacheCtx.txKey(cacheKey);
IgniteTxEntry txEntry = entry(txKey);
assert txEntry != null;
// Check if there is cached value.
while (true) {
GridCacheEntryEx cached = txEntry.cached();
CacheObject val = null;
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
try {
Object transformClo = (!F.isEmpty(txEntry.entryProcessors()) && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null;
if (needVer) {
getRes = cached.innerGetVersioned(null, GridNearTxLocal.this, /*update-metrics*/
true, /*event*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary(), null);
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = cached.innerGet(null, GridNearTxLocal.this, /*read through*/
false, /*metrics*/
true, /*events*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary());
}
// If value is in cache and passed the filter.
if (val != null) {
missed.remove(cacheKey);
txEntry.setAndMarkValid(val);
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
cacheCtx.addResult(retMap, cacheKey, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
if (readVer != null)
txEntry.entryReadVersion(readVer);
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed exception in get postLock (will retry): " + cached);
txEntry.cached(entryEx(cacheCtx, txKey, topologyVersion()));
}
}
}
if (!missed.isEmpty() && cacheCtx.isLocal()) {
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc0);
}
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
};
FinishClosure<Map<K, V>> finClos = new FinishClosure<Map<K, V>>() {
@Override
Map<K, V> finish(Map<K, V> loaded) {
retMap.putAll(loaded);
return retMap;
}
};
if (fut.isDone()) {
try {
IgniteInternalFuture<Map<K, V>> fut1 = plc2.apply(fut.get(), null);
return fut1.isDone() ? new GridFinishedFuture<>(finClos.apply(fut1.get(), null)) : new GridEmbeddedFuture<>(finClos, fut1);
} catch (GridClosureException e) {
return new GridFinishedFuture<>(e.unwrap());
} catch (IgniteCheckedException e) {
try {
return plc2.apply(false, e);
} catch (Exception e1) {
return new GridFinishedFuture<>(e1);
}
}
} else {
return new GridEmbeddedFuture<>(fut, plc2, finClos);
}
} else {
assert optimistic() || readCommitted() || skipVals;
if (!missed.isEmpty()) {
if (!readCommitted())
for (Iterator<KeyCacheObject> it = missed.keySet().iterator(); it.hasNext(); ) {
KeyCacheObject cacheKey = it.next();
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false));
if (retMap.containsKey(keyVal))
it.remove();
}
if (missed.isEmpty())
return new GridFinishedFuture<>(retMap);
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc);
}
return new GridFinishedFuture<>(retMap);
}
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class IgniteCachePartitionLossPolicySelfTest method checkLostPartition.
/**
* @param canWrite {@code True} if writes are allowed.
* @param safe {@code True} if lost partition should trigger exception.
* @throws Exception if failed.
*/
private void checkLostPartition(boolean canWrite, boolean safe) throws Exception {
assert partLossPlc != null;
int part = prepareTopology();
// Wait for all grids (servers and client) have same topology version
// to make sure that all nodes received map with lost partition.
GridTestUtils.waitForCondition(() -> {
AffinityTopologyVersion last = null;
for (Ignite ig : G.allGrids()) {
AffinityTopologyVersion ver = ((IgniteEx) ig).context().cache().context().exchange().readyAffinityVersion();
if (last != null && !last.equals(ver))
return false;
last = ver;
}
return true;
}, 10000);
for (Ignite ig : G.allGrids()) {
info("Checking node: " + ig.cluster().localNode().id());
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
verifyCacheOps(canWrite, safe, part, ig);
// Check we can read and write to lost partition in recovery mode.
IgniteCache<Integer, Integer> recoverCache = cache.withPartitionRecover();
for (int lostPart : recoverCache.lostPartitions()) {
recoverCache.get(lostPart);
recoverCache.put(lostPart, lostPart);
}
// Check that writing in recover mode does not clear partition state.
verifyCacheOps(canWrite, safe, part, ig);
}
// Check that partition state does not change after we start a new node.
IgniteEx grd = startGrid(3);
info("Newly started node: " + grd.cluster().localNode().id());
for (Ignite ig : G.allGrids()) verifyCacheOps(canWrite, safe, part, ig);
ignite(0).resetLostPartitions(Collections.singletonList(CACHE_NAME));
awaitPartitionMapExchange(true, true, null);
for (Ignite ig : G.allGrids()) {
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
assertTrue(cache.lostPartitions().isEmpty());
int parts = ig.affinity(CACHE_NAME).partitions();
for (int i = 0; i < parts; i++) {
cache.get(i);
cache.put(i, i);
}
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion in project ignite by apache.
the class GridClusterStateProcessor method sendComputeChangeGlobalState.
/**
* @param activate New cluster state.
* @param resFut State change future.
*/
private void sendComputeChangeGlobalState(boolean activate, BaselineTopology blt, boolean forceBlt, final GridFutureAdapter<Void> resFut) {
AffinityTopologyVersion topVer = ctx.discovery().topologyVersionEx();
if (log.isInfoEnabled()) {
log.info("Sending " + prettyStr(activate) + " request from node [id=" + ctx.localNodeId() + ", topVer=" + topVer + ", client=" + ctx.clientNode() + ", daemon=" + ctx.isDaemon() + "]");
}
IgniteCompute comp = ((ClusterGroupAdapter) ctx.cluster().get().forServers()).compute();
IgniteFuture<Void> fut = comp.runAsync(new ClientChangeGlobalStateComputeRequest(activate, blt, forceBlt));
fut.listen(new CI1<IgniteFuture>() {
@Override
public void apply(IgniteFuture fut) {
try {
fut.get();
resFut.onDone();
} catch (Exception e) {
resFut.onDone(e);
}
}
});
}
Aggregations