use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTxLocal method getAllAsync.
/**
* @param cacheCtx Cache context.
* @param keys Keys to get.
* @param deserializeBinary Deserialize binary flag.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects
* @param skipStore Skip store flag.
* @return Future for this get.
*/
@SuppressWarnings("unchecked")
public <K, V> IgniteInternalFuture<Map<K, V>> getAllAsync(final GridCacheContext cacheCtx, @Nullable final AffinityTopologyVersion entryTopVer, Collection<KeyCacheObject> keys, final boolean deserializeBinary, final boolean skipVals, final boolean keepCacheObjects, final boolean skipStore, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
init();
int keysCnt = keys.size();
boolean single = keysCnt == 1;
try {
checkValid();
final Map<K, V> retMap = new GridLeanMap<>(keysCnt);
final Map<KeyCacheObject, GridCacheVersion> missed = new GridLeanMap<>(pessimistic() ? keysCnt : 0);
CacheOperationContext opCtx = cacheCtx.operationContextPerCall();
ExpiryPolicy expiryPlc = opCtx != null ? opCtx.expiry() : null;
final Collection<KeyCacheObject> lockKeys = enlistRead(cacheCtx, entryTopVer, keys, expiryPlc, retMap, missed, keysCnt, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer);
if (single && missed.isEmpty())
return new GridFinishedFuture<>(retMap);
// Handle locks.
if (pessimistic() && !readCommitted() && !skipVals) {
if (expiryPlc == null)
expiryPlc = cacheCtx.expiry();
long accessTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForAccess()) : CU.TTL_NOT_CHANGED;
long createTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForCreation()) : CU.TTL_NOT_CHANGED;
long timeout = remainingTime();
if (timeout == -1)
return new GridFinishedFuture<>(timeoutException());
IgniteInternalFuture<Boolean> fut = cacheCtx.cache().txLockAsync(lockKeys, timeout, this, true, true, isolation, isInvalidate(), createTtl, accessTtl);
final ExpiryPolicy expiryPlc0 = expiryPlc;
PLC2<Map<K, V>> plc2 = new PLC2<Map<K, V>>() {
@Override
public IgniteInternalFuture<Map<K, V>> postLock() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Acquired transaction lock for read on keys: " + lockKeys);
// Load keys only after the locks have been acquired.
for (KeyCacheObject cacheKey : lockKeys) {
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, true));
if (retMap.containsKey(keyVal))
// We already have a return value.
continue;
IgniteTxKey txKey = cacheCtx.txKey(cacheKey);
IgniteTxEntry txEntry = entry(txKey);
assert txEntry != null;
// Check if there is cached value.
while (true) {
GridCacheEntryEx cached = txEntry.cached();
CacheObject val = null;
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
try {
Object transformClo = (!F.isEmpty(txEntry.entryProcessors()) && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null;
if (needVer) {
getRes = cached.innerGetVersioned(null, GridNearTxLocal.this, /*update-metrics*/
true, /*event*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary(), null);
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = cached.innerGet(null, GridNearTxLocal.this, /*read through*/
false, /*metrics*/
true, /*events*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary());
}
// If value is in cache and passed the filter.
if (val != null) {
missed.remove(cacheKey);
txEntry.setAndMarkValid(val);
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
cacheCtx.addResult(retMap, cacheKey, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
if (readVer != null)
txEntry.entryReadVersion(readVer);
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed exception in get postLock (will retry): " + cached);
txEntry.cached(entryEx(cacheCtx, txKey, topologyVersion()));
}
}
}
if (!missed.isEmpty() && cacheCtx.isLocal()) {
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc0);
}
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
};
FinishClosure<Map<K, V>> finClos = new FinishClosure<Map<K, V>>() {
@Override
Map<K, V> finish(Map<K, V> loaded) {
retMap.putAll(loaded);
return retMap;
}
};
if (fut.isDone()) {
try {
IgniteInternalFuture<Map<K, V>> fut1 = plc2.apply(fut.get(), null);
return fut1.isDone() ? new GridFinishedFuture<>(finClos.apply(fut1.get(), null)) : new GridEmbeddedFuture<>(finClos, fut1);
} catch (GridClosureException e) {
return new GridFinishedFuture<>(e.unwrap());
} catch (IgniteCheckedException e) {
try {
return plc2.apply(false, e);
} catch (Exception e1) {
return new GridFinishedFuture<>(e1);
}
}
} else {
return new GridEmbeddedFuture<>(fut, plc2, finClos);
}
} else {
assert optimistic() || readCommitted() || skipVals;
if (!missed.isEmpty()) {
if (!readCommitted())
for (Iterator<KeyCacheObject> it = missed.keySet().iterator(); it.hasNext(); ) {
KeyCacheObject cacheKey = it.next();
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false));
if (retMap.containsKey(keyVal))
it.remove();
}
if (missed.isEmpty())
return new GridFinishedFuture<>(retMap);
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc);
}
return new GridFinishedFuture<>(retMap);
}
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTxLocal method loadMissing.
/**
* @param cacheCtx Cache context.
* @param keys Keys to load.
* @param filter Filter.
* @param ret Return value.
* @param needReadVer Read version flag.
* @param singleRmv {@code True} for single remove operation.
* @param hasFilters {@code True} if filters not empty.
* @param readThrough Read through flag.
* @param retval Return value flag.
* @param expiryPlc Expiry policy.
* @return Load future.
*/
private IgniteInternalFuture<Void> loadMissing(final GridCacheContext cacheCtx, final AffinityTopologyVersion topVer, final Set<KeyCacheObject> keys, final CacheEntryPredicate[] filter, final GridCacheReturn ret, final boolean needReadVer, final boolean singleRmv, final boolean hasFilters, final boolean readThrough, final boolean retval, final boolean keepBinary, final boolean recovery, final ExpiryPolicy expiryPlc) {
GridInClosure3<KeyCacheObject, Object, GridCacheVersion> c = new GridInClosure3<KeyCacheObject, Object, GridCacheVersion>() {
@Override
public void apply(KeyCacheObject key, @Nullable Object val, @Nullable GridCacheVersion loadVer) {
if (log.isDebugEnabled())
log.debug("Loaded value from remote node [key=" + key + ", val=" + val + ']');
IgniteTxEntry e = entry(new IgniteTxKey(key, cacheCtx.cacheId()));
assert e != null;
if (needReadVer) {
assert loadVer != null;
e.entryReadVersion(singleRmv && val != null ? SER_READ_NOT_EMPTY_VER : loadVer);
}
if (singleRmv) {
assert !hasFilters && !retval;
assert val == null || Boolean.TRUE.equals(val) : val;
ret.set(cacheCtx, null, val != null, keepBinary);
} else {
CacheObject cacheVal = cacheCtx.toCacheObject(val);
if (e.op() == TRANSFORM) {
GridCacheVersion ver;
e.readValue(cacheVal);
try {
ver = e.cached().version();
} catch (GridCacheEntryRemovedException ex) {
assert optimistic() : e;
if (log.isDebugEnabled())
log.debug("Failed to get entry version: [msg=" + ex.getMessage() + ']');
ver = null;
}
addInvokeResult(e, cacheVal, ret, ver);
} else {
boolean success;
if (hasFilters) {
success = isAll(e.context(), key, cacheVal, filter);
if (!success)
e.value(cacheVal, false, false);
} else
success = true;
ret.set(cacheCtx, cacheVal, success, keepBinary);
}
}
}
};
return loadMissing(cacheCtx, topVer, readThrough, /*async*/
true, keys, /*skipVals*/
singleRmv, needReadVer, keepBinary, recovery, expiryPlc, c);
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearLockFuture method requestedKeys0.
/**
* @return Keys for which locks requested from remote nodes but response isn't received.
*/
private Set<IgniteTxKey> requestedKeys0() {
for (IgniteInternalFuture<Boolean> miniFut : futures()) {
if (isMini(miniFut) && !miniFut.isDone()) {
MiniFuture mini = (MiniFuture) miniFut;
Set<IgniteTxKey> requestedKeys = U.newHashSet(mini.keys.size());
for (KeyCacheObject key : mini.keys) requestedKeys.add(new IgniteTxKey(key, cctx.cacheId()));
return requestedKeys;
}
}
return null;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey in project ignite by apache.
the class GridNearTransactionalCache method startRemoteTx.
/**
* @param nodeId Primary node ID.
* @param req Request.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
* @throws GridDistributedLockCancelledException If lock has been cancelled.
*/
@SuppressWarnings({ "RedundantTypeArguments", "ForLoopReplaceableByForEach" })
@Nullable
public GridNearTxRemote startRemoteTx(UUID nodeId, GridDhtLockRequest req) throws IgniteCheckedException, GridDistributedLockCancelledException {
List<KeyCacheObject> nearKeys = req.nearKeys();
GridNearTxRemote tx = null;
ClassLoader ldr = ctx.deploy().globalLoader();
if (ldr != null) {
Collection<IgniteTxKey> evicted = null;
// Avoid iterator creation.
for (int i = 0; i < nearKeys.size(); i++) {
KeyCacheObject key = nearKeys.get(i);
if (key == null)
continue;
IgniteTxKey txKey = ctx.txKey(key);
if (log.isDebugEnabled())
log.debug("Unmarshalled key: " + key);
GridNearCacheEntry entry = null;
while (true) {
try {
entry = peekExx(key);
if (entry != null) {
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
tx = ctx.tm().nearTx(req.version());
if (tx == null) {
tx = new GridNearTxRemote(ctx.shared(), req.topologyVersion(), nodeId, req.nearNodeId(), req.nearXidVersion(), req.version(), null, ctx.systemTx(), ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), req.txSize(), req.subjectId(), req.taskNameHash());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx))
throw new IgniteTxRollbackCheckedException("Failed to acquire lock " + "(transaction has been completed): " + req.version());
}
tx.addEntry(ctx, txKey, GridCacheOperation.NOOP, null, /*Value.*/
null, /*dr version*/
req.skipStore(), req.keepBinary());
}
// Add remote candidate before reordering.
// Owned candidates should be reordered inside entry lock.
entry.addRemote(req.nodeId(), nodeId, req.threadId(), req.version(), tx != null, tx != null && tx.implicitSingle(), req.owned(entry.key()));
if (!req.inTx())
ctx.evicts().touch(entry, req.topologyVersion());
} else {
if (evicted == null)
evicted = new LinkedList<>();
evicted.add(txKey);
}
// Double-check in case if sender node left the grid.
if (ctx.discovery().node(req.nodeId()) == null) {
if (log.isDebugEnabled())
log.debug("Node requesting lock left grid (lock request will be ignored): " + req);
if (tx != null)
tx.rollbackRemoteTx();
return null;
}
// Entry is legit.
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry;
if (log.isDebugEnabled())
log.debug("Received entry removed exception (will retry on renewed entry): " + entry);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']');
}
}
}
}
if (tx != null && evicted != null) {
assert !evicted.isEmpty();
for (IgniteTxKey evict : evicted) tx.addEvicted(evict);
}
} else {
String err = "Failed to acquire deployment class loader for message: " + req;
U.warn(log, err);
throw new IgniteCheckedException(err);
}
return tx;
}
Aggregations