use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtTxPrepareFuture method sendPrepareRequests.
/**
*/
private void sendPrepareRequests() {
assert !tx.txState().mvccEnabled() || !tx.onePhaseCommit() || tx.mvccSnapshot() != null;
int miniId = 0;
assert tx.transactionNodes() != null;
final long timeout = timeoutObj != null ? timeoutObj.timeout : 0;
// Do not need process active transactions on backups.
MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
if (mvccSnapshot != null)
mvccSnapshot = mvccSnapshot.withoutActiveTransactions();
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : tx.dhtMap().values()) {
assert !dhtMapping.empty() || dhtMapping.queryUpdate();
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = tx.nearMap().get(n.id());
Collection<IgniteTxEntry> nearWrites = nearMapping == null ? null : nearMapping.writes();
Collection<IgniteTxEntry> dhtWrites = dhtMapping.writes();
if (!dhtMapping.queryUpdate() && F.isEmpty(dhtWrites) && F.isEmpty(nearWrites))
continue;
MiniFuture fut = new MiniFuture(n.id(), ++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
assert req.transactionNodes() != null;
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, dhtWrites, nearWrites, this.req.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal, mvccSnapshot, cctx.tm().txHandler().filterUpdateCountersForBackupNode(tx, n));
req.queryUpdate(dhtMapping.queryUpdate());
int idx = 0;
for (IgniteTxEntry entry : dhtWrites) {
try {
GridDhtCacheEntry cached = (GridDhtCacheEntry) entry.cached();
GridCacheContext<?, ?> cacheCtx = cached.context();
// Do not invalidate near entry on originating transaction node.
req.invalidateNearEntry(idx, !tx.nearNodeId().equals(n.id()) && cached.readerId(n.id()) != null);
if (cached.isNewLocked()) {
List<ClusterNode> owners = cacheCtx.topology().owners(cached.partition(), tx != null ? tx.topologyVersion() : cacheCtx.affinity().affinityTopologyVersion());
// Do not preload if local node is a partition owner.
if (!owners.contains(cctx.localNode()))
req.markKeyForPreload(idx);
}
break;
} catch (GridCacheEntryRemovedException e) {
log.error("Got removed exception on entry with dht local candidate. Transaction will be " + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e);
// Entry was unlocked by concurrent rollback.
onError(tx.rollbackException());
}
idx++;
}
if (!F.isEmpty(nearWrites)) {
for (IgniteTxEntry entry : nearWrites) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Missing candidate for cache entry:" + entry;
assert added.dhtLocal();
if (added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException e) {
log.error("Got removed exception on entry with dht local candidate. Transaction will be " + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e);
// Entry was unlocked by concurrent rollback.
onError(tx.rollbackException());
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
fut.onResult(e);
}
}
for (GridDistributedTxMapping nearMapping : tx.nearMap().values()) {
if (!tx.dhtMap().containsKey(nearMapping.primary().id())) {
if (tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(nearMapping.primary().id(), ++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, null, nearMapping.writes(), tx.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal, mvccSnapshot, null);
for (IgniteTxEntry entry : nearMapping.entries()) {
if (CU.writes().apply(entry)) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Null candidate for non-group-lock entry " + "[added=" + added + ", entry=" + entry + ']';
assert added.dhtLocal() : "Got non-dht-local candidate for prepare future" + "[added=" + added + ", entry=" + entry + ']';
if (added != null && added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException e) {
log.error("Got removed exception on entry with dht local candidate. Transaction will be " + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e);
// Entry was unlocked by concurrent rollback.
onError(tx.rollbackException());
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (!cctx.kernalContext().isStopping()) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
fut.onResult(e);
} else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near, ignore [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
}
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method removeLocks.
/**
* @param nodeId Node ID.
* @param ver Version.
* @param keys Keys.
* @param unmap Flag for un-mapping version.
*/
public void removeLocks(UUID nodeId, GridCacheVersion ver, Iterable<KeyCacheObject> keys, boolean unmap) {
assert nodeId != null;
assert ver != null;
if (F.isEmpty(keys))
return;
// Remove mapped versions.
GridCacheVersion dhtVer = unmap ? ctx.mvcc().unmapVersion(ver) : ver;
ctx.mvcc().addRemoved(ctx, ver);
Map<ClusterNode, List<KeyCacheObject>> dhtMap = new HashMap<>();
Map<ClusterNode, List<KeyCacheObject>> nearMap = new HashMap<>();
GridCacheVersion obsoleteVer = null;
for (KeyCacheObject key : keys) {
while (true) {
boolean created = false;
GridDhtCacheEntry entry = peekExx(key);
if (entry == null) {
entry = entryExx(key);
created = true;
}
try {
GridCacheMvccCandidate cand = null;
if (dhtVer == null) {
cand = entry.localCandidateByNearVersion(ver, true);
if (cand != null)
dhtVer = cand.version();
else {
if (log.isDebugEnabled())
log.debug("Failed to locate lock candidate based on dht or near versions [nodeId=" + nodeId + ", ver=" + ver + ", unmap=" + unmap + ", keys=" + keys + ']');
entry.removeLock(ver);
if (created) {
if (obsoleteVer == null)
obsoleteVer = nextVersion();
if (entry.markObsolete(obsoleteVer))
removeEntry(entry);
}
break;
}
}
if (cand == null)
cand = entry.candidate(dhtVer);
AffinityTopologyVersion topVer = cand == null ? AffinityTopologyVersion.NONE : cand.topologyVersion();
// Note that we obtain readers before lock is removed.
// Even in case if entry would be removed just after lock is removed,
// we must send release messages to backups and readers.
Collection<UUID> readers = entry.readers();
// we are about to remove.
if (entry.removeLock(dhtVer)) {
// Map to backups and near readers.
map(nodeId, topVer, entry, readers, dhtMap, nearMap);
if (log.isDebugEnabled())
log.debug("Removed lock [lockId=" + ver + ", key=" + key + ']');
} else if (log.isDebugEnabled())
log.debug("Received unlock request for unknown candidate " + "(added to cancelled locks set) [ver=" + ver + ", entry=" + entry + ']');
if (created && entry.markObsolete(dhtVer))
removeEntry(entry);
entry.touch();
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Received remove lock request for removed entry (will retry): " + entry);
}
}
}
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
// Backups.
for (Map.Entry<ClusterNode, List<KeyCacheObject>> entry : dhtMap.entrySet()) {
ClusterNode n = entry.getKey();
List<KeyCacheObject> keyBytes = entry.getValue();
GridDhtUnlockRequest req = new GridDhtUnlockRequest(ctx.cacheId(), keyBytes.size(), ctx.deploymentEnabled());
req.version(dhtVer);
try {
for (KeyCacheObject key : keyBytes) req.addKey(key, ctx);
keyBytes = nearMap.get(n);
if (keyBytes != null)
for (KeyCacheObject key : keyBytes) req.addNearKey(key);
req.completedVersions(committed, rolledback);
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Node left while sending unlock request: " + n);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request to node (will make best effort to complete): " + n, e);
}
}
// Readers.
for (Map.Entry<ClusterNode, List<KeyCacheObject>> entry : nearMap.entrySet()) {
ClusterNode n = entry.getKey();
if (!dhtMap.containsKey(n)) {
List<KeyCacheObject> keyBytes = entry.getValue();
GridDhtUnlockRequest req = new GridDhtUnlockRequest(ctx.cacheId(), keyBytes.size(), ctx.deploymentEnabled());
req.version(dhtVer);
try {
for (KeyCacheObject key : keyBytes) req.addNearKey(key);
req.completedVersions(committed, rolledback);
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Node left while sending unlock request: " + n);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request to node (will make best effort to complete): " + n, e);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtColocatedLockFuture method addEntry.
/**
* Adds entry to future.
*
* @param entry Entry to add.
* @return Non-reentry candidate if lock should be acquired on remote node,
* reentry candidate if locks has been already acquired and {@code null} if explicit locks is held and
* implicit transaction accesses locked entry.
* @throws IgniteCheckedException If failed to add entry due to external locking.
*/
@Nullable
private GridCacheMvccCandidate addEntry(GridDistributedCacheEntry entry) throws IgniteCheckedException {
IgniteTxKey txKey = entry.txKey();
GridCacheMvccCandidate cand = cctx.mvcc().explicitLock(threadId, txKey);
if (inTx()) {
if (cand != null) {
if (!tx.implicit())
throw new IgniteCheckedException("Cannot access key within transaction if lock is " + "externally held [key=" + entry.key() + ", entry=" + entry + ']');
else
return null;
} else {
IgniteTxEntry txEntry = tx.entry(txKey);
assert txEntry != null;
txEntry.cached(entry);
// Check transaction entries (corresponding tx entries must be enlisted in transaction).
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, txEntry.locked(), inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
}
} else {
if (cand == null) {
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, false, inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
} else
cand = cand.reenter();
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
}
return cand;
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtColocatedLockFuture method addLocalKey.
/**
* Adds local key future.
*
* @param key Key to add.
* @param topVer Topology version.
* @param distributedKeys Collection of keys needs to be locked.
* @return {@code True} if transaction accesses key that was explicitly locked before.
* @throws IgniteCheckedException If lock is externally held and transaction is explicit.
*/
private boolean addLocalKey(KeyCacheObject key, AffinityTopologyVersion topVer, Collection<KeyCacheObject> distributedKeys) throws IgniteCheckedException {
GridDistributedCacheEntry entry = cctx.colocated().entryExx(key, topVer, false);
assert !entry.detached();
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return false;
}
GridCacheMvccCandidate cand = addEntry(entry);
if (cand != null && !cand.reentry())
distributedKeys.add(key);
return inTx() && cand == null;
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridLocalCacheEntry method addLocal.
/**
* Add local candidate.
*
* @param threadId Owning thread ID.
* @param ver Lock version.
* @param serOrder Version for serializable transactions ordering.
* @param serReadVer Optional read entry version for optimistic serializable transaction.
* @param timeout Timeout to acquire lock.
* @param reenter Reentry flag.
* @param tx Transaction flag.
* @param implicitSingle Implicit transaction flag.
* @param read Read lock flag.
* @return New candidate.
* @throws GridCacheEntryRemovedException If entry has been removed.
*/
@Nullable
GridCacheMvccCandidate addLocal(long threadId, GridCacheVersion ver, @Nullable GridCacheVersion serOrder, @Nullable GridCacheVersion serReadVer, long timeout, boolean reenter, boolean tx, boolean implicitSingle, boolean read) throws GridCacheEntryRemovedException {
assert serReadVer == null || serOrder != null;
CacheObject val;
GridCacheMvccCandidate cand;
CacheLockCandidates prev;
CacheLockCandidates owner = null;
lockEntry();
try {
checkObsolete();
if (serReadVer != null) {
if (!checkSerializableReadVersion(serReadVer))
return null;
}
GridCacheMvcc mvcc = mvccExtras();
if (mvcc == null) {
mvcc = new GridCacheMvcc(cctx);
mvccExtras(mvcc);
}
prev = mvcc.localOwners();
cand = mvcc.addLocal(this, /*nearNodeId*/
null, /*nearVer*/
null, threadId, ver, timeout, serOrder, reenter, tx, implicitSingle, /*dht-local*/
false, read);
if (mvcc.isEmpty())
mvccExtras(null);
else
owner = mvcc.localOwners();
val = this.val;
} finally {
unlockEntry();
}
if (cand != null && !cand.reentry())
cctx.mvcc().addNext(cctx, cand);
checkOwnerChanged(prev, owner, val);
return cand;
}
Aggregations