use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry in project ignite by apache.
the class GridNearTransactionalCache method unlockAll.
/** {@inheritDoc} */
@Override
public void unlockAll(Collection<? extends K> keys) {
if (keys.isEmpty())
return;
try {
GridCacheVersion ver = null;
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (K key : keys) {
while (true) {
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
GridDistributedCacheEntry entry = peekExx(cacheKey);
if (entry == null)
// While.
break;
try {
GridCacheMvccCandidate cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId());
AffinityTopologyVersion topVer = AffinityTopologyVersion.NONE;
if (cand != null) {
assert cand.nearLocal() : "Got non-near-local candidate in near cache: " + cand;
ver = cand.version();
if (map == null) {
Collection<ClusterNode> affNodes = CU.allNodes(ctx, cand.topologyVersion());
if (F.isEmpty(affNodes))
return;
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
topVer = cand.topologyVersion();
// Send request to remove from remote nodes.
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to unlock key (all partition nodes left the grid).");
break;
}
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
// Remove candidate from local node first.
GridCacheMvccCandidate rmv = entry.removeLock();
if (rmv != null) {
if (!rmv.reentry()) {
if (ver != null && !ver.equals(rmv.version()))
throw new IgniteCheckedException("Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys);
if (!primary.isLocal()) {
assert req != null;
req.addKey(entry.key(), ctx);
} else
locKeys.add(cacheKey);
if (log.isDebugEnabled())
log.debug("Removed lock (will distribute): " + rmv);
} else if (log.isDebugEnabled())
log.debug("Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']');
}
}
assert !topVer.equals(AffinityTopologyVersion.NONE) || cand == null;
if (topVer.equals(AffinityTopologyVersion.NONE))
topVer = ctx.affinity().affinityTopologyVersion();
ctx.evicts().touch(entry, topVer);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Attempted to unlock removed entry (will retry): " + entry);
}
}
}
if (ver == null)
return;
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (n.isLocal())
dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
else if (!F.isEmpty(req.keys()))
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry in project ignite by apache.
the class GridNearLockFuture method proceedMapping0.
/**
* Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
* remote primary node.
*
* @throws IgniteCheckedException If mapping can not be completed.
*/
@SuppressWarnings("unchecked")
private void proceedMapping0() throws IgniteCheckedException {
GridNearLockMapping map;
synchronized (this) {
map = mappings.poll();
}
// If there are no more mappings to process, complete the future.
if (map == null)
return;
final GridNearLockRequest req = map.request();
final Collection<KeyCacheObject> mappedKeys = map.distributedKeys();
final ClusterNode node = map.node();
if (filter != null && filter.length != 0)
req.filter(filter, cctx);
if (node.isLocal()) {
req.miniId(-1);
if (log.isDebugEnabled())
log.debug("Before locally locking near request: " + req);
IgniteInternalFuture<GridNearLockResponse> fut = dht().lockAllAsync(cctx, cctx.localNode(), req, filter);
// Add new future.
add(new GridEmbeddedFuture<>(new C2<GridNearLockResponse, Exception, Boolean>() {
@Override
public Boolean apply(GridNearLockResponse res, Exception e) {
if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error())))
return false;
if (e != null) {
onError(e);
return false;
}
if (res == null) {
onError(new IgniteCheckedException("Lock response is null for future: " + this));
return false;
}
if (res.error() != null) {
onError(res.error());
return false;
}
if (log.isDebugEnabled())
log.debug("Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']');
try {
int i = 0;
for (KeyCacheObject k : mappedKeys) {
while (true) {
GridNearCacheEntry entry = cctx.near().entryExx(k, req.topologyVersion());
try {
IgniteBiTuple<GridCacheVersion, CacheObject> oldValTup = valMap.get(entry.key());
boolean hasBytes = entry.hasValue();
CacheObject oldVal = entry.rawGet();
CacheObject newVal = res.value(i);
GridCacheVersion dhtVer = res.dhtVersion(i);
GridCacheVersion mappedVer = res.mappedVersion(i);
// On local node don't record twice if DHT cache already recorded.
boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer);
if (newVal == null) {
if (oldValTup != null) {
if (oldValTup.get1().equals(dhtVer))
newVal = oldValTup.get2();
oldVal = oldValTup.get2();
}
}
// Lock is held at this point, so we can set the
// returned value if any.
entry.resetFromPrimary(newVal, lockVer, dhtVer, node.id(), topVer);
entry.readyNearLock(lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending());
if (inTx() && implicitTx() && tx.onePhaseCommit()) {
boolean pass = res.filterResult(i);
tx.entry(cctx.txKey(k)).filters(pass ? CU.empty0() : CU.alwaysFalse0Arr());
}
if (record) {
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx.shared()), null, inTx() ? tx.resolveTaskName() : null, keepBinary);
if (cctx.cache().configuration().isStatisticsEnabled())
cctx.cache().metrics0().onRead(oldVal != null);
}
if (log.isDebugEnabled())
log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');
// Inner while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to add candidates because entry was " + "removed (will renew).");
synchronized (GridNearLockFuture.this) {
// Replace old entry with new one.
entries.set(i, (GridDistributedCacheEntry) cctx.cache().entryEx(entry.key()));
}
}
}
// Increment outside of while loop.
i++;
}
// Proceed and add new future (if any) before completing embedded future.
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
return false;
}
return true;
}
}, fut));
} else {
final MiniFuture fut = new MiniFuture(node, mappedKeys, ++miniId);
req.miniId(fut.futureId());
// Append new future.
add(fut);
IgniteInternalFuture<?> txSync = null;
if (inTx())
txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());
if (txSync == null || txSync.isDone()) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
}
} else {
txSync.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
} catch (IgniteCheckedException e) {
onError(e);
}
}
});
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry in project ignite by apache.
the class GridNearTransactionalCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param ver Lock version.
* @param keys Keys.
*/
@SuppressWarnings({ "unchecked" })
public void removeLocks(GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
for (KeyCacheObject key : keys) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = null;
while (true) {
GridDistributedCacheEntry entry = peekExx(key);
try {
if (entry != null) {
GridCacheMvccCandidate cand = entry.candidate(ver);
if (cand != null) {
if (map == null) {
Collection<ClusterNode> affNodes = CU.allNodes(ctx, cand.topologyVersion());
if (F.isEmpty(affNodes))
return;
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, cand.topologyVersion());
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to unlock key (all partition nodes left the grid).");
break;
}
if (!primary.isLocal()) {
req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
}
// Remove candidate from local node first.
if (entry.removeLock(cand.version())) {
if (primary.isLocal()) {
dht.removeLocks(primary.id(), ver, F.asList(key), true);
assert req == null;
continue;
}
req.addKey(entry.key(), ctx);
}
}
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']');
}
}
}
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry in project ignite by apache.
the class GridDhtColocatedLockFuture method addLocalKey.
/**
* Adds local key future.
*
* @param key Key to add.
* @param topVer Topology version.
* @param distributedKeys Collection of keys needs to be locked.
* @return {@code True} if transaction accesses key that was explicitly locked before.
* @throws IgniteCheckedException If lock is externally held and transaction is explicit.
*/
private boolean addLocalKey(KeyCacheObject key, AffinityTopologyVersion topVer, Collection<KeyCacheObject> distributedKeys) throws IgniteCheckedException {
GridDistributedCacheEntry entry = cctx.colocated().entryExx(key, topVer, false);
assert !entry.detached();
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return false;
}
GridCacheMvccCandidate cand = addEntry(entry);
if (cand != null && !cand.reentry())
distributedKeys.add(key);
return inTx() && cand == null;
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry in project ignite by apache.
the class GridDhtTxPrepareFuture method readyLocks.
/**
* @param checkEntries Entries.
*/
private void readyLocks(Iterable<IgniteTxEntry> checkEntries) {
for (IgniteTxEntry txEntry : checkEntries) {
GridCacheContext cacheCtx = txEntry.context();
if (cacheCtx.isLocal())
continue;
GridDistributedCacheEntry entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry == null) {
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
if (tx.optimistic() && txEntry.explicitVersion() == null) {
synchronized (this) {
lockKeys.add(txEntry.txKey());
}
}
while (true) {
try {
assert txEntry.explicitVersion() == null || entry.lockedBy(txEntry.explicitVersion());
CacheLockCandidates owners = entry.readyLock(tx.xidVersion());
if (log.isDebugEnabled())
log.debug("Current lock owners for entry [owner=" + owners + ", entry=" + entry + ']');
// While.
break;
}// Possible if entry cached within transaction is obsolete.
catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in future onAllReplies method (will retry): " + txEntry);
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
}
}
}
Aggregations