use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockMapping in project ignite by apache.
the class GridDhtColocatedLockFuture method map0.
/**
* @param keys Keys to map.
* @param remap Remap flag.
* @param topLocked Topology locked flag.
* @throws IgniteCheckedException If mapping failed.
*/
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
// First assume this node is primary for all keys passed in.
if (!clientNode && mapAsPrimary(keys, topVer))
return;
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.colocatedLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean hasRmtNodes = false;
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
boolean loc = node.equals(cctx.localNode());
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
GridDistributedCacheEntry entry = null;
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(txKey);
if (txEntry != null) {
entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry != null && loc == entry.detached()) {
entry = cctx.colocated().entryExx(key, topVer, true);
txEntry.cached(entry);
}
}
}
boolean explicit;
while (true) {
try {
if (entry == null)
entry = cctx.colocated().entryExx(key, topVer, true);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
GridCacheMvccCandidate cand = addEntry(entry);
// Will either return value from dht cache or null if this is a miss.
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (cand != null && !cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
dhtVer, cctx);
}
explicit = inTx() && cand == null;
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
entry = null;
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty()) {
mapping.distributedKeys(distributedKeys);
hasRmtNodes |= !mapping.node().isLocal();
} else {
assert mapping.request() == null;
iter.remove();
}
}
if (hasRmtNodes) {
trackable = true;
if (!remap && !cctx.mvcc().addFuture(this))
throw new IllegalStateException("Duplicate future ID: " + this);
} else
trackable = false;
proceedMapping();
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockMapping in project ignite by apache.
the class GridDhtColocatedLockFuture method proceedMapping0.
/**
* Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
* remote primary node.
*
* @throws IgniteCheckedException If mapping can not be completed.
*/
private void proceedMapping0() throws IgniteCheckedException {
GridNearLockMapping map;
synchronized (this) {
map = mappings.poll();
}
// If there are no more mappings to process, complete the future.
if (map == null)
return;
final GridNearLockRequest req = map.request();
final Collection<KeyCacheObject> mappedKeys = map.distributedKeys();
final ClusterNode node = map.node();
if (filter != null && filter.length != 0)
req.filter(filter, cctx);
if (node.isLocal())
lockLocally(mappedKeys, req.topologyVersion());
else {
final MiniFuture fut = new MiniFuture(node, mappedKeys, ++miniId);
req.miniId(fut.futureId());
// Append new future.
add(fut);
IgniteInternalFuture<?> txSync = null;
if (inTx())
txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());
if (txSync == null || txSync.isDone()) {
try {
cctx.io().send(node, req, cctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Collocated lock fut, sent request [txId=" + lockVer + ", inTx=" + inTx() + ", node=" + node.id() + ']');
}
} catch (ClusterTopologyCheckedException ex) {
assert fut != null;
fut.onResult(ex);
}
} else {
txSync.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
cctx.io().send(node, req, cctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Collocated lock fut, sent request [txId=" + lockVer + ", inTx=" + inTx() + ", node=" + node.id() + ']');
}
} catch (ClusterTopologyCheckedException ex) {
assert fut != null;
fut.onResult(ex);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Collocated lock fut, failed to send request [txId=" + lockVer + ", inTx=" + inTx() + ", node=" + node.id() + ", err=" + e + ']');
}
onError(e);
}
}
});
}
}
}
Aggregations