use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridDhtColocatedCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param threadId Thread ID.
* @param ver Lock version.
* @param keys Keys.
*/
public void removeLocks(long threadId, GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (KeyCacheObject key : keys) {
IgniteTxKey txKey = ctx.txKey(key);
GridCacheMvccCandidate lock = ctx.mvcc().removeExplicitLock(threadId, txKey, ver);
if (lock != null) {
AffinityTopologyVersion topVer = lock.topologyVersion();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, topVer);
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to remove locks (all partition nodes left the grid).");
continue;
}
if (!primary.isLocal()) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
GridCacheEntryEx entry = peekEx(key);
KeyCacheObject key0 = entry != null ? entry.key() : key;
req.addKey(key0, ctx);
} else
locKeys.add(key);
}
}
if (!locKeys.isEmpty())
removeLocks(ctx.localNodeId(), ver, locKeys, true);
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
try {
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to send unlock request (node has left the grid) [keys=" + req.keys() + ", n=" + n + ", e=" + e + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request [keys=" + req.keys() + ", n=" + n + ']', e);
}
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridDhtColocatedLockFuture method requestedKeys0.
/**
* @return Keys for which locks requested from remote nodes but response isn't received.
*/
private Set<IgniteTxKey> requestedKeys0() {
for (IgniteInternalFuture<Boolean> miniFut : futures()) {
if (isMini(miniFut) && !miniFut.isDone()) {
MiniFuture mini = (MiniFuture) miniFut;
Set<IgniteTxKey> requestedKeys = U.newHashSet(mini.keys.size());
for (KeyCacheObject key : mini.keys) requestedKeys.add(new IgniteTxKey(key, cctx.cacheId()));
return requestedKeys;
}
}
return null;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridDhtColocatedLockFuture method addEntry.
/**
* Adds entry to future.
*
* @param entry Entry to add.
* @return Non-reentry candidate if lock should be acquired on remote node,
* reentry candidate if locks has been already acquired and {@code null} if explicit locks is held and
* implicit transaction accesses locked entry.
* @throws IgniteCheckedException If failed to add entry due to external locking.
*/
@Nullable
private GridCacheMvccCandidate addEntry(GridDistributedCacheEntry entry) throws IgniteCheckedException {
IgniteTxKey txKey = entry.txKey();
GridCacheMvccCandidate cand = cctx.mvcc().explicitLock(threadId, txKey);
if (inTx()) {
if (cand != null) {
if (!tx.implicit())
throw new IgniteCheckedException("Cannot access key within transaction if lock is " + "externally held [key=" + entry.key() + ", entry=" + entry + ']');
else
return null;
} else {
IgniteTxEntry txEntry = tx.entry(txKey);
assert txEntry != null;
txEntry.cached(entry);
// Check transaction entries (corresponding tx entries must be enlisted in transaction).
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, txEntry.locked(), inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
}
} else {
if (cand == null) {
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, false, inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
} else
cand = cand.reenter();
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
}
return cand;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false, true);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, true, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else {
if (timedOut)
return;
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
}
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearLockFuture method requestedKeys0.
/**
* @return Keys for which locks requested from remote nodes but response isn't received.
*/
private Set<IgniteTxKey> requestedKeys0() {
for (IgniteInternalFuture<Boolean> miniFut : futures()) {
if (isMini(miniFut) && !miniFut.isDone()) {
MiniFuture mini = (MiniFuture) miniFut;
Set<IgniteTxKey> requestedKeys = U.newHashSet(mini.keys.size());
for (KeyCacheObject key : mini.keys) requestedKeys.add(new IgniteTxKey(key, cctx.cacheId()));
return requestedKeys;
}
}
return null;
}
Aggregations