use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridNearTxLocal method getAllAsync.
/**
* @param cacheCtx Cache context.
* @param keys Keys to get.
* @param deserializeBinary Deserialize binary flag.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects
* @param skipStore Skip store flag.
* @return Future for this get.
*/
@SuppressWarnings("unchecked")
public <K, V> IgniteInternalFuture<Map<K, V>> getAllAsync(final GridCacheContext cacheCtx, @Nullable final AffinityTopologyVersion entryTopVer, Collection<KeyCacheObject> keys, final boolean deserializeBinary, final boolean skipVals, final boolean keepCacheObjects, final boolean skipStore, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
init();
int keysCnt = keys.size();
boolean single = keysCnt == 1;
try {
checkValid();
final Map<K, V> retMap = new GridLeanMap<>(keysCnt);
final Map<KeyCacheObject, GridCacheVersion> missed = new GridLeanMap<>(pessimistic() ? keysCnt : 0);
CacheOperationContext opCtx = cacheCtx.operationContextPerCall();
ExpiryPolicy expiryPlc = opCtx != null ? opCtx.expiry() : null;
final Collection<KeyCacheObject> lockKeys = enlistRead(cacheCtx, entryTopVer, keys, expiryPlc, retMap, missed, keysCnt, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer);
if (single && missed.isEmpty())
return new GridFinishedFuture<>(retMap);
// Handle locks.
if (pessimistic() && !readCommitted() && !skipVals) {
if (expiryPlc == null)
expiryPlc = cacheCtx.expiry();
long accessTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForAccess()) : CU.TTL_NOT_CHANGED;
long createTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForCreation()) : CU.TTL_NOT_CHANGED;
long timeout = remainingTime();
if (timeout == -1)
return new GridFinishedFuture<>(timeoutException());
IgniteInternalFuture<Boolean> fut = cacheCtx.cache().txLockAsync(lockKeys, timeout, this, true, true, isolation, isInvalidate(), createTtl, accessTtl);
final ExpiryPolicy expiryPlc0 = expiryPlc;
PLC2<Map<K, V>> plc2 = new PLC2<Map<K, V>>() {
@Override
public IgniteInternalFuture<Map<K, V>> postLock() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Acquired transaction lock for read on keys: " + lockKeys);
// Load keys only after the locks have been acquired.
for (KeyCacheObject cacheKey : lockKeys) {
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, true));
if (retMap.containsKey(keyVal))
// We already have a return value.
continue;
IgniteTxKey txKey = cacheCtx.txKey(cacheKey);
IgniteTxEntry txEntry = entry(txKey);
assert txEntry != null;
// Check if there is cached value.
while (true) {
GridCacheEntryEx cached = txEntry.cached();
CacheObject val = null;
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
try {
Object transformClo = (!F.isEmpty(txEntry.entryProcessors()) && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null;
if (needVer) {
getRes = cached.innerGetVersioned(null, GridNearTxLocal.this, /*update-metrics*/
true, /*event*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary(), null);
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = cached.innerGet(null, GridNearTxLocal.this, /*read through*/
false, /*metrics*/
true, /*events*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary());
}
// If value is in cache and passed the filter.
if (val != null) {
missed.remove(cacheKey);
txEntry.setAndMarkValid(val);
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
cacheCtx.addResult(retMap, cacheKey, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
if (readVer != null)
txEntry.entryReadVersion(readVer);
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed exception in get postLock (will retry): " + cached);
txEntry.cached(entryEx(cacheCtx, txKey, topologyVersion()));
}
}
}
if (!missed.isEmpty() && cacheCtx.isLocal()) {
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc0);
}
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
};
FinishClosure<Map<K, V>> finClos = new FinishClosure<Map<K, V>>() {
@Override
Map<K, V> finish(Map<K, V> loaded) {
retMap.putAll(loaded);
return retMap;
}
};
if (fut.isDone()) {
try {
IgniteInternalFuture<Map<K, V>> fut1 = plc2.apply(fut.get(), null);
return fut1.isDone() ? new GridFinishedFuture<>(finClos.apply(fut1.get(), null)) : new GridEmbeddedFuture<>(finClos, fut1);
} catch (GridClosureException e) {
return new GridFinishedFuture<>(e.unwrap());
} catch (IgniteCheckedException e) {
try {
return plc2.apply(false, e);
} catch (Exception e1) {
return new GridFinishedFuture<>(e1);
}
}
} else {
return new GridEmbeddedFuture<>(fut, plc2, finClos);
}
} else {
assert optimistic() || readCommitted() || skipVals;
if (!missed.isEmpty()) {
if (!readCommitted())
for (Iterator<KeyCacheObject> it = missed.keySet().iterator(); it.hasNext(); ) {
KeyCacheObject cacheKey = it.next();
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false));
if (retMap.containsKey(keyVal))
it.remove();
}
if (missed.isEmpty())
return new GridFinishedFuture<>(retMap);
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc);
}
return new GridFinishedFuture<>(retMap);
}
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method createLockReply.
/**
* @param nearNode Near node.
* @param entries Entries.
* @param req Lock request.
* @param tx Transaction.
* @param mappedVer Mapped version.
* @param err Error.
* @return Response.
*/
private GridNearLockResponse createLockReply(ClusterNode nearNode, List<GridCacheEntryEx> entries, GridNearLockRequest req, @Nullable GridDhtTxLocalAdapter tx, GridCacheVersion mappedVer, Throwable err) {
assert mappedVer != null;
assert tx == null || tx.xidVersion().equals(mappedVer);
try {
// Send reply back to originating near node.
GridNearLockResponse res = new GridNearLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), tx != null && tx.onePhaseCommit(), entries.size(), err, null, ctx.deploymentEnabled());
if (err == null) {
res.pending(localDhtPendingVersions(entries, mappedVer));
// We have to add completed versions for cases when nearLocal and remote transactions
// execute concurrently.
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(req.version());
res.completedVersions(versPair.get1(), versPair.get2());
int i = 0;
for (ListIterator<GridCacheEntryEx> it = entries.listIterator(); it.hasNext(); ) {
GridCacheEntryEx e = it.next();
assert e != null;
while (true) {
try {
// Don't return anything for invalid partitions.
if (tx == null || !tx.isRollbackOnly()) {
GridCacheVersion dhtVer = req.dhtVersion(i);
GridCacheVersion ver = e.version();
boolean ret = req.returnValue(i) || dhtVer == null || !dhtVer.equals(ver);
CacheObject val = null;
if (ret)
val = e.innerGet(null, tx, /*read-through*/
false, /*update-metrics*/
true, /*event notification*/
req.returnValue(i), CU.subjectId(tx, ctx.shared()), null, tx != null ? tx.resolveTaskName() : null, null, req.keepBinary());
assert e.lockedBy(mappedVer) || (ctx.mvcc().isRemoved(e.context(), mappedVer) && req.timeout() > 0) : "Entry does not own lock for tx [locNodeId=" + ctx.localNodeId() + ", entry=" + e + ", mappedVer=" + mappedVer + ", ver=" + ver + ", tx=" + tx + ", req=" + req + ", err=" + err + ']';
boolean filterPassed = false;
if (tx != null && tx.onePhaseCommit()) {
IgniteTxEntry writeEntry = tx.entry(ctx.txKey(e.key()));
assert writeEntry != null : "Missing tx entry for locked cache entry: " + e;
filterPassed = writeEntry.filtersPassed();
}
if (ret && val == null)
val = e.valueBytes(null);
// We include values into response since they are required for local
// calls and won't be serialized. We are also including DHT version.
res.addValueBytes(ret ? val : null, filterPassed, ver, mappedVer);
} else {
// We include values into response since they are required for local
// calls and won't be serialized. We are also including DHT version.
res.addValueBytes(null, false, e.version(), mappedVer);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when sending reply to DHT lock request " + "(will retry): " + e);
e = entryExx(e.key());
it.set(e);
}
}
i++;
}
}
return res;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get value for lock reply message for node [node=" + U.toShortString(nearNode) + ", req=" + req + ']', e);
return new GridNearLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), false, entries.size(), e, null, ctx.deploymentEnabled());
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxFinishFuture method finish.
/**
* @param commit Commit flag.
* @param dhtMap DHT map.
* @param nearMap Near map.
* @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
*/
private boolean finish(boolean commit, Map<UUID, GridDistributedTxMapping> dhtMap, Map<UUID, GridDistributedTxMapping> nearMap) {
if (tx.onePhaseCommit())
return false;
boolean sync = tx.syncMode() == FULL_SYNC;
if (tx.explicitLock())
sync = true;
boolean res = false;
int miniId = 0;
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : dhtMap.values()) {
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = nearMap.get(n.id());
if (dhtMapping.empty() && nearMapping != null && nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
Collection<Long> updCntrs = new ArrayList<>(dhtMapping.entries().size());
for (IgniteTxEntry e : dhtMapping.entries()) updCntrs.add(e.updateCounter());
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), updCntrs, false, false);
req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion());
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
if (sync)
res = true;
else
fut.onDone();
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
for (GridDistributedTxMapping nearMapping : nearMap.values()) {
if (!dhtMap.containsKey(nearMapping.primary().id())) {
if (nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), false, false);
req.writeVersion(tx.writeVersion());
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
if (sync)
res = true;
else
fut.onDone();
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
}
return res;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxLocalAdapter method addMapping.
/**
* @param mappings Entry mappings.
* @param dst Transaction mappings.
*/
private void addMapping(Map<ClusterNode, List<GridDhtCacheEntry>> mappings, Map<UUID, GridDistributedTxMapping> dst) {
for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapping : mappings.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedTxMapping m = dst.get(n.id());
List<GridDhtCacheEntry> entries = mapping.getValue();
for (GridDhtCacheEntry entry : entries) {
IgniteTxEntry txEntry = entry(entry.txKey());
if (txEntry != null) {
if (m == null)
dst.put(n.id(), m = new GridDistributedTxMapping(n));
m.add(txEntry);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxLocalAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param entries Entries to lock.
* @param msgId Message ID.
* @param read Read flag.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param needRetVal Return value flag.
* @param skipStore Skip store flag.
* @param keepBinary Keep binary flag.
* @param nearCache {@code True} if near cache enabled on originating node.
* @return Lock future.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
IgniteInternalFuture<GridCacheReturn> lockAllAsync(GridCacheContext cacheCtx, List<GridCacheEntryEx> entries, long msgId, final boolean read, final boolean needRetVal, long createTtl, long accessTtl, boolean skipStore, boolean keepBinary, boolean nearCache) {
try {
checkValid();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
if (F.isEmpty(entries))
return new GridFinishedFuture<>(ret);
init();
onePhaseCommit(onePhaseCommit);
try {
Set<KeyCacheObject> skipped = null;
AffinityTopologyVersion topVer = topologyVersion();
GridDhtCacheAdapter dhtCache = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
// Enlist locks into transaction.
for (int i = 0; i < entries.size(); i++) {
GridCacheEntryEx entry = entries.get(i);
KeyCacheObject key = entry.key();
IgniteTxEntry txEntry = entry(entry.txKey());
// First time access.
if (txEntry == null) {
GridDhtCacheEntry cached;
while (true) {
try {
cached = dhtCache.entryExx(key, topVer);
cached.unswap(read);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Get removed entry: " + key);
}
}
addActiveCache(dhtCache.context(), false);
txEntry = addEntry(NOOP, null, null, null, cached, null, CU.empty0(), false, -1L, -1L, null, skipStore, keepBinary, nearCache);
if (read)
txEntry.ttl(accessTtl);
txEntry.cached(cached);
addReader(msgId, cached, txEntry, topVer);
} else {
if (skipped == null)
skipped = new GridLeanSet<>();
skipped.add(key);
}
}
assert pessimistic();
Collection<KeyCacheObject> keys = F.viewReadOnly(entries, CU.entry2Key());
// Acquire locks only after having added operation to the write set.
// Otherwise, during rollback we will not know whether locks need
// to be rolled back.
// Loose all skipped and previously locked (we cannot reenter locks here).
final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F0.notIn(skipped)) : keys;
if (log.isDebugEnabled())
log.debug("Lock keys: " + passedKeys);
return obtainLockAsync(cacheCtx, ret, passedKeys, read, needRetVal, createTtl, accessTtl, skipStore, keepBinary);
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
Aggregations