use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method processDhtLockRequest0.
/**
* @param nodeId Node ID.
* @param req Request.
*/
private void processDhtLockRequest0(UUID nodeId, GridDhtLockRequest req) {
assert nodeId != null;
assert req != null;
assert !nodeId.equals(locNodeId);
int cnt = F.size(req.keys());
GridDhtLockResponse res;
GridDhtTxRemote dhtTx = null;
GridNearTxRemote nearTx = null;
boolean fail = false;
boolean cancelled = false;
try {
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), cnt, ctx.deploymentEnabled());
dhtTx = startRemoteTx(nodeId, req, res);
nearTx = isNearEnabled(cacheCfg) ? near().startRemoteTx(nodeId, req) : null;
if (nearTx != null && !nearTx.empty())
res.nearEvicted(nearTx.evicted());
else {
if (!F.isEmpty(req.nearKeys())) {
Collection<IgniteTxKey> nearEvicted = new ArrayList<>(req.nearKeys().size());
nearEvicted.addAll(F.viewReadOnly(req.nearKeys(), new C1<KeyCacheObject, IgniteTxKey>() {
@Override
public IgniteTxKey apply(KeyCacheObject k) {
return ctx.txKey(k);
}
}));
res.nearEvicted(nearEvicted);
}
}
} catch (IgniteTxRollbackCheckedException e) {
String err = "Failed processing DHT lock request (transaction has been completed): " + req;
U.error(log, err, e);
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteTxRollbackCheckedException(err, e), ctx.deploymentEnabled());
fail = true;
} catch (IgniteCheckedException e) {
String err = "Failed processing DHT lock request: " + req;
U.error(log, err, e);
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteCheckedException(err, e), ctx.deploymentEnabled());
fail = true;
} catch (GridDistributedLockCancelledException ignored) {
// Received lock request for cancelled lock.
if (log.isDebugEnabled())
log.debug("Received lock request for canceled lock (will ignore): " + req);
res = null;
fail = true;
cancelled = true;
}
boolean releaseAll = false;
if (res != null) {
try {
// Reply back to sender.
ctx.io().send(nodeId, res, ctx.ioPolicy());
if (txLockMsgLog.isDebugEnabled()) {
txLockMsgLog.debug("Sent dht lock response [txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
U.warn(txLockMsgLog, "Failed to send dht lock response, node failed [" + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
fail = true;
releaseAll = true;
} catch (IgniteCheckedException e) {
U.error(txLockMsgLog, "Failed to send dht lock response (lock will not be acquired) " + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']', e);
fail = true;
}
}
if (fail) {
if (dhtTx != null)
dhtTx.rollbackRemoteTx();
if (// Even though this should never happen, we leave this check for consistency.
nearTx != null)
nearTx.rollbackRemoteTx();
List<KeyCacheObject> keys = req.keys();
if (keys != null) {
for (KeyCacheObject key : keys) {
while (true) {
GridDistributedCacheEntry entry = peekExx(key);
try {
if (entry != null) {
// Release all locks because sender node left grid.
if (releaseAll)
entry.removeExplicitNodeLocks(req.nodeId());
else
entry.removeLock(req.version());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Attempted to remove lock on removed entity during during failure " + "handling for dht lock request (will retry): " + entry);
}
}
}
}
if (releaseAll && !cancelled)
U.warn(log, "Sender node left grid in the midst of lock acquisition (locks have been released).");
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method startRemoteTx.
/**
* @param nodeId Primary node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
* @throws GridDistributedLockCancelledException If lock has been cancelled.
*/
@SuppressWarnings({ "RedundantTypeArguments" })
@Nullable
private GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtLockRequest req, GridDhtLockResponse res) throws IgniteCheckedException, GridDistributedLockCancelledException {
List<KeyCacheObject> keys = req.keys();
GridDhtTxRemote tx = null;
int size = F.size(keys);
for (int i = 0; i < size; i++) {
KeyCacheObject key = keys.get(i);
if (key == null)
continue;
IgniteTxKey txKey = ctx.txKey(key);
if (log.isDebugEnabled())
log.debug("Unmarshalled key: " + key);
GridDistributedCacheEntry entry = null;
while (true) {
try {
int part = ctx.affinity().partition(key);
GridDhtLocalPartition locPart = ctx.topology().localPartition(part, req.topologyVersion(), false);
if (locPart == null || !locPart.reserve()) {
if (log.isDebugEnabled())
log.debug("Local partition for given key is already evicted (will add to invalid " + "partition list) [key=" + key + ", part=" + part + ", locPart=" + locPart + ']');
res.addInvalidPartition(part);
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg))
obsoleteNearEntry(key);
break;
}
try {
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
if (tx == null)
tx = ctx.tm().tx(req.version());
if (tx == null) {
tx = new GridDhtTxRemote(ctx.shared(), req.nodeId(), req.futureId(), nodeId, req.nearXidVersion(), req.topologyVersion(), req.version(), /*commitVer*/
null, ctx.systemTx(), ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), req.txSize(), req.subjectId(), req.taskNameHash(), !req.skipStore() && req.storeUsed());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx))
throw new IgniteTxRollbackCheckedException("Failed to acquire lock (transaction " + "has been completed) [ver=" + req.version() + ", tx=" + tx + ']');
}
tx.addWrite(ctx, NOOP, txKey, null, null, req.accessTtl(), req.skipStore(), req.keepBinary());
}
entry = entryExx(key, req.topologyVersion());
// Add remote candidate before reordering.
entry.addRemote(req.nodeId(), nodeId, req.threadId(), req.version(), tx != null, tx != null && tx.implicitSingle(), null);
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg) && req.invalidateNearEntry(i))
invalidateNearEntry(key, req.version());
// Get entry info after candidate is added.
if (req.needPreloadKey(i)) {
entry.unswap();
GridCacheEntryInfo info = entry.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
// Double-check in case if sender node left the grid.
if (ctx.discovery().node(req.nodeId()) == null) {
if (log.isDebugEnabled())
log.debug("Node requesting lock left grid (lock request will be ignored): " + req);
entry.removeLock(req.version());
if (tx != null) {
tx.clearEntry(txKey);
// COMMITTING state, but this lock is never acquired.
if (tx.state() == COMMITTING)
tx.forceCommit();
else
tx.rollbackRemoteTx();
}
return null;
}
// Entry is legit.
break;
} finally {
locPart.release();
}
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Received invalid partition exception [e=" + e + ", req=" + req + ']');
res.addInvalidPartition(e.partition());
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg))
obsoleteNearEntry(key);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared invalid entry from remote transaction (will skip) [entry=" + entry + ", tx=" + tx + ']');
}
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry;
if (log.isDebugEnabled())
log.debug("Received entry removed exception (will retry on renewed entry): " + entry);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']');
}
}
}
}
if (tx != null && tx.empty()) {
if (log.isDebugEnabled())
log.debug("Rolling back remote DHT transaction because it is empty [req=" + req + ", res=" + res + ']');
tx.rollbackRemoteTx();
tx = null;
}
return tx;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTxLocal method enlistWriteEntry.
/**
* @param cacheCtx Cache context.
* @param cacheKey Key.
* @param val Value.
* @param entryProcessor Entry processor.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param expiryPlc Explicitly specified expiry policy for entry.
* @param retval Return value flag.
* @param lockOnly Lock only flag.
* @param filter Filter.
* @param drVer DR version.
* @param drTtl DR ttl.
* @param drExpireTime DR expire time.
* @param ret Return value.
* @param enlisted Enlisted keys collection.
* @param skipStore Skip store flag.
* @param singleRmv {@code True} for single remove operation.
* @param hasFilters {@code True} if filters not empty.
* @param needVal {@code True} if value is needed.
* @param needReadVer {@code True} if need read entry version.
* @return {@code True} if entry value should be loaded.
* @throws IgniteCheckedException If failed.
*/
private boolean enlistWriteEntry(GridCacheContext cacheCtx, @Nullable AffinityTopologyVersion entryTopVer, final KeyCacheObject cacheKey, @Nullable final Object val, @Nullable final EntryProcessor<?, ?, ?> entryProcessor, @Nullable final Object[] invokeArgs, @Nullable final ExpiryPolicy expiryPlc, final boolean retval, final boolean lockOnly, final CacheEntryPredicate[] filter, final GridCacheVersion drVer, final long drTtl, long drExpireTime, final GridCacheReturn ret, @Nullable final Collection<KeyCacheObject> enlisted, boolean skipStore, boolean singleRmv, boolean hasFilters, final boolean needVal, boolean needReadVer, boolean keepBinary, boolean recovery) throws IgniteCheckedException {
boolean loadMissed = false;
final boolean rmv = val == null && entryProcessor == null;
IgniteTxKey txKey = cacheCtx.txKey(cacheKey);
IgniteTxEntry txEntry = entry(txKey);
// First time access.
if (txEntry == null) {
while (true) {
GridCacheEntryEx entry = entryEx(cacheCtx, txKey, entryTopVer != null ? entryTopVer : topologyVersion());
try {
entry.unswap(false);
// Check if lock is being explicitly acquired by the same thread.
if (!implicit && cctx.kernalContext().config().isCacheSanityCheckEnabled() && entry.lockedByThread(threadId, xidVer)) {
throw new IgniteCheckedException("Cannot access key within transaction if lock is " + "externally held [key=" + CU.value(cacheKey, cacheCtx, false) + ", entry=" + entry + ", xidVer=" + xidVer + ", threadId=" + threadId + ", locNodeId=" + cctx.localNodeId() + ']');
}
CacheObject old = null;
GridCacheVersion readVer = null;
if (optimistic() && !implicit()) {
try {
if (needReadVer) {
EntryGetResult res = primaryLocal(entry) ? entry.innerGetVersioned(null, this, /*metrics*/
retval, /*events*/
retval, CU.subjectId(this, cctx), entryProcessor, resolveTaskName(), null, keepBinary, null) : null;
if (res != null) {
old = res.value();
readVer = res.version();
}
} else {
old = entry.innerGet(null, this, /*read through*/
false, /*metrics*/
retval, /*events*/
retval, CU.subjectId(this, cctx), entryProcessor, resolveTaskName(), null, keepBinary);
}
} catch (ClusterTopologyCheckedException e) {
entry.context().evicts().touch(entry, topologyVersion());
throw e;
}
} else
old = entry.rawGet();
final GridCacheOperation op = lockOnly ? NOOP : rmv ? DELETE : entryProcessor != null ? TRANSFORM : old != null ? UPDATE : CREATE;
if (old != null && hasFilters && !filter(entry.context(), cacheKey, old, filter)) {
ret.set(cacheCtx, old, false, keepBinary);
if (!readCommitted()) {
if (optimistic() && serializable()) {
txEntry = addEntry(op, old, entryProcessor, invokeArgs, entry, expiryPlc, filter, true, drTtl, drExpireTime, drVer, skipStore, keepBinary, CU.isNearEnabled(cacheCtx));
} else {
txEntry = addEntry(READ, old, null, null, entry, null, CU.empty0(), false, -1L, -1L, null, skipStore, keepBinary, CU.isNearEnabled(cacheCtx));
}
txEntry.markValid();
if (needReadVer) {
assert readVer != null;
txEntry.entryReadVersion(singleRmv ? SER_READ_NOT_EMPTY_VER : readVer);
}
}
if (readCommitted())
cacheCtx.evicts().touch(entry, topologyVersion());
// While.
break;
}
CacheObject cVal = cacheCtx.toCacheObject(val);
if (op == CREATE || op == UPDATE)
cacheCtx.validateKeyAndValue(cacheKey, cVal);
txEntry = addEntry(op, cVal, entryProcessor, invokeArgs, entry, expiryPlc, filter, true, drTtl, drExpireTime, drVer, skipStore, keepBinary, CU.isNearEnabled(cacheCtx));
if (enlisted != null)
enlisted.add(cacheKey);
if (!pessimistic() && !implicit()) {
txEntry.markValid();
if (old == null) {
if (needVal)
loadMissed = true;
else {
assert !implicit() || !transform : this;
assert txEntry.op() != TRANSFORM : txEntry;
if (retval)
ret.set(cacheCtx, null, true, keepBinary);
else
ret.success(true);
}
} else {
if (needReadVer) {
assert readVer != null;
txEntry.entryReadVersion(singleRmv ? SER_READ_NOT_EMPTY_VER : readVer);
}
if (retval && !transform)
ret.set(cacheCtx, old, true, keepBinary);
else {
if (txEntry.op() == TRANSFORM) {
GridCacheVersion ver;
try {
ver = entry.version();
} catch (GridCacheEntryRemovedException ex) {
assert optimistic() : txEntry;
if (log.isDebugEnabled())
log.debug("Failed to get entry version " + "[err=" + ex.getMessage() + ']');
ver = null;
}
addInvokeResult(txEntry, old, ret, ver);
} else
ret.success(true);
}
}
} else // Pessimistic.
{
if (retval && !transform)
ret.set(cacheCtx, old, true, keepBinary);
else
ret.success(true);
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry in transaction putAll0 method: " + entry);
}
}
} else {
if (entryProcessor == null && txEntry.op() == TRANSFORM)
throw new IgniteCheckedException("Failed to enlist write value for key (cannot have update value in " + "transaction after EntryProcessor is applied): " + CU.value(cacheKey, cacheCtx, false));
GridCacheEntryEx entry = txEntry.cached();
CacheObject v = txEntry.value();
boolean del = txEntry.op() == DELETE && rmv;
if (!del) {
if (hasFilters && !filter(entry.context(), cacheKey, v, filter)) {
ret.set(cacheCtx, v, false, keepBinary);
return loadMissed;
}
GridCacheOperation op = rmv ? DELETE : entryProcessor != null ? TRANSFORM : v != null ? UPDATE : CREATE;
CacheObject cVal = cacheCtx.toCacheObject(val);
if (op == CREATE || op == UPDATE)
cacheCtx.validateKeyAndValue(cacheKey, cVal);
txEntry = addEntry(op, cVal, entryProcessor, invokeArgs, entry, expiryPlc, filter, true, drTtl, drExpireTime, drVer, skipStore, keepBinary, CU.isNearEnabled(cacheCtx));
if (enlisted != null)
enlisted.add(cacheKey);
if (txEntry.op() == TRANSFORM) {
GridCacheVersion ver;
try {
ver = entry.version();
} catch (GridCacheEntryRemovedException e) {
assert optimistic() : txEntry;
if (log.isDebugEnabled())
log.debug("Failed to get entry version: [msg=" + e.getMessage() + ']');
ver = null;
}
addInvokeResult(txEntry, txEntry.value(), ret, ver);
}
}
if (!pessimistic()) {
txEntry.markValid();
if (retval && !transform)
ret.set(cacheCtx, v, true, keepBinary);
else
ret.success(true);
}
}
return loadMissed;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTxPrepareFutureAdapter method onPrepareResponse.
/**
* @param m Mapping.
* @param res Response.
* @param updateMapping Update mapping flag.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
final void onPrepareResponse(GridDistributedTxMapping m, GridNearTxPrepareResponse res, boolean updateMapping) {
if (res == null)
return;
assert res.error() == null : res;
if (tx.onePhaseCommit() && !res.onePhaseCommit())
tx.onePhaseCommit(false);
UUID nodeId = m.primary().id();
for (Map.Entry<IgniteTxKey, CacheVersionedValue> entry : res.ownedValues().entrySet()) {
IgniteTxEntry txEntry = tx.entry(entry.getKey());
assert txEntry != null;
GridCacheContext cacheCtx = txEntry.context();
while (true) {
try {
if (cacheCtx.isNear()) {
GridNearCacheEntry nearEntry = (GridNearCacheEntry) txEntry.cached();
CacheVersionedValue tup = entry.getValue();
nearEntry.resetFromPrimary(tup.value(), tx.xidVersion(), tup.version(), nodeId, tx.topologyVersion());
} else if (txEntry.cached().detached()) {
GridDhtDetachedCacheEntry detachedEntry = (GridDhtDetachedCacheEntry) txEntry.cached();
CacheVersionedValue tup = entry.getValue();
detachedEntry.resetFromPrimary(tup.value(), tx.xidVersion());
}
break;
} catch (GridCacheEntryRemovedException ignored) {
// Retry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion()));
}
}
}
tx.implicitSingleResult(res.returnValue());
for (IgniteTxKey key : res.filterFailedKeys()) {
IgniteTxEntry txEntry = tx.entry(key);
assert txEntry != null : "Missing tx entry for write key: " + key;
txEntry.op(NOOP);
assert txEntry.context() != null;
ExpiryPolicy expiry = txEntry.context().expiryForTxEntry(txEntry);
if (expiry != null)
txEntry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
}
if (!m.empty()) {
// This step is very important as near and DHT versions grow separately.
cctx.versions().onReceived(nodeId, res.dhtVersion());
if (updateMapping && m.hasNearCacheEntries()) {
GridCacheVersion writeVer = res.writeVersion();
if (writeVer == null)
writeVer = res.dhtVersion();
// Register DHT version.
m.dhtVersion(res.dhtVersion(), writeVer);
GridDistributedTxMapping map = tx.mappings().get(nodeId);
if (map != null)
map.dhtVersion(res.dhtVersion(), writeVer);
tx.readyNearLocks(m, res.pending(), res.committedVersions(), res.rolledbackVersions());
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTxPrepareResponse method prepareMarshal.
/**
* {@inheritDoc}
* @param ctx
*/
@Override
public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
super.prepareMarshal(ctx);
if (ownedVals != null && ownedValKeys == null) {
ownedValKeys = ownedVals.keySet();
ownedValVals = ownedVals.values();
for (Map.Entry<IgniteTxKey, CacheVersionedValue> entry : ownedVals.entrySet()) {
GridCacheContext cacheCtx = ctx.cacheContext(entry.getKey().cacheId());
entry.getKey().prepareMarshal(cacheCtx);
entry.getValue().prepareMarshal(cacheCtx.cacheObjectContext());
}
}
if (retVal != null && retVal.cacheId() != 0) {
GridCacheContext cctx = ctx.cacheContext(retVal.cacheId());
assert cctx != null : retVal.cacheId();
retVal.prepareMarshal(cctx);
}
if (filterFailedKeys != null) {
for (IgniteTxKey key : filterFailedKeys) {
GridCacheContext cctx = ctx.cacheContext(key.cacheId());
key.prepareMarshal(cctx);
}
}
}
Aggregations