use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxLocalAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param entries Entries to lock.
* @param msgId Message ID.
* @param read Read flag.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param needRetVal Return value flag.
* @param skipStore Skip store flag.
* @return Lock future.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
IgniteInternalFuture<GridCacheReturn> lockAllAsync(GridCacheContext cacheCtx, List<GridCacheEntryEx> entries, long msgId, final boolean read, final boolean needRetVal, long createTtl, long accessTtl, boolean skipStore, boolean keepBinary) {
try {
checkValid();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
if (F.isEmpty(entries))
return new GridFinishedFuture<>(ret);
init();
onePhaseCommit(onePhaseCommit);
try {
Set<KeyCacheObject> skipped = null;
AffinityTopologyVersion topVer = topologyVersion();
GridDhtCacheAdapter dhtCache = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
// Enlist locks into transaction.
for (int i = 0; i < entries.size(); i++) {
GridCacheEntryEx entry = entries.get(i);
KeyCacheObject key = entry.key();
IgniteTxEntry txEntry = entry(entry.txKey());
// First time access.
if (txEntry == null) {
GridDhtCacheEntry cached;
while (true) {
try {
cached = dhtCache.entryExx(key, topVer);
cached.unswap(read);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Get removed entry: " + key);
}
}
addActiveCache(dhtCache.context(), false);
txEntry = addEntry(NOOP, null, null, null, cached, null, CU.empty0(), false, -1L, -1L, null, skipStore, keepBinary);
if (read)
txEntry.ttl(accessTtl);
txEntry.cached(cached);
addReader(msgId, cached, txEntry, topVer);
} else {
if (skipped == null)
skipped = new GridLeanSet<>();
skipped.add(key);
}
}
assert pessimistic();
Collection<KeyCacheObject> keys = F.viewReadOnly(entries, CU.entry2Key());
// Acquire locks only after having added operation to the write set.
// Otherwise, during rollback we will not know whether locks need
// to be rolled back.
// Loose all skipped and previously locked (we cannot reenter locks here).
final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F0.notIn(skipped)) : keys;
if (log.isDebugEnabled())
log.debug("Lock keys: " + passedKeys);
return obtainLockAsync(cacheCtx, ret, passedKeys, read, needRetVal, createTtl, accessTtl, skipStore, keepBinary);
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxLocalAdapter method addMapping.
/**
* @param mappings Entry mappings.
* @param dst Transaction mappings.
*/
private void addMapping(Map<ClusterNode, List<GridDhtCacheEntry>> mappings, Map<UUID, GridDistributedTxMapping> dst) {
for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapping : mappings.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedTxMapping m = dst.get(n.id());
List<GridDhtCacheEntry> entries = mapping.getValue();
for (GridDhtCacheEntry entry : entries) {
IgniteTxEntry txEntry = entry(entry.txKey());
if (txEntry != null) {
if (m == null)
dst.put(n.id(), m = new GridDistributedTxMapping(n));
m.add(txEntry);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxPrepareFuture method addDhtValues.
/**
* @param res Response being sent.
*/
private void addDhtValues(GridNearTxPrepareResponse res) {
// Interceptor on near node needs old values to execute callbacks.
if (!F.isEmpty(writes)) {
for (IgniteTxEntry e : writes) {
IgniteTxEntry txEntry = tx.entry(e.txKey());
assert txEntry != null : "Missing tx entry for key [tx=" + tx + ", key=" + e.txKey() + ']';
GridCacheContext cacheCtx = txEntry.context();
while (true) {
try {
GridCacheEntryEx entry = txEntry.cached();
GridCacheVersion dhtVer = entry.version();
CacheObject val0 = entry.valueBytes();
if (val0 != null)
res.addOwnedValue(txEntry.txKey(), dhtVer, val0);
break;
} catch (GridCacheEntryRemovedException ignored) {
// Retry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion()));
}
}
}
}
for (Map.Entry<IgniteTxKey, GridCacheVersion> ver : dhtVerMap.entrySet()) {
IgniteTxEntry txEntry = tx.entry(ver.getKey());
if (res.hasOwnedValue(ver.getKey()))
continue;
assert txEntry != null : ver;
GridCacheContext cacheCtx = txEntry.context();
while (true) {
try {
GridCacheEntryEx entry = txEntry.cached();
GridCacheVersion dhtVer = entry.version();
if (ver.getValue() == null || !ver.getValue().equals(dhtVer)) {
CacheObject val0 = entry.valueBytes();
res.addOwnedValue(txEntry.txKey(), dhtVer, val0);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
// Retry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion()));
}
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDistributedTxRemoteAdapter method setPartitionUpdateCounters.
/** {@inheritDoc} */
@Override
public void setPartitionUpdateCounters(long[] cntrs) {
if (writeMap() != null && !writeMap().isEmpty() && cntrs != null && cntrs.length > 0) {
int i = 0;
for (IgniteTxEntry txEntry : writeMap().values()) {
txEntry.updateCounter(cntrs[i]);
++i;
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtColocatedLockFuture method map0.
/**
* @param keys Keys to map.
* @param remap Remap flag.
* @param topLocked Topology locked flag.
* @throws IgniteCheckedException If mapping failed.
*/
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
// First assume this node is primary for all keys passed in.
if (!clientNode && mapAsPrimary(keys, topVer))
return;
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.colocatedLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean hasRmtNodes = false;
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
boolean loc = node.equals(cctx.localNode());
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
GridDistributedCacheEntry entry = null;
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(txKey);
if (txEntry != null) {
entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry != null && loc == entry.detached()) {
entry = cctx.colocated().entryExx(key, topVer, true);
txEntry.cached(entry);
}
}
}
boolean explicit;
while (true) {
try {
if (entry == null)
entry = cctx.colocated().entryExx(key, topVer, true);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
GridCacheMvccCandidate cand = addEntry(entry);
// Will either return value from dht cache or null if this is a miss.
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (cand != null && !cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
dhtVer, cctx);
}
explicit = inTx() && cand == null;
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
entry = null;
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty()) {
mapping.distributedKeys(distributedKeys);
hasRmtNodes |= !mapping.node().isLocal();
} else {
assert mapping.request() == null;
iter.remove();
}
}
if (hasRmtNodes) {
trackable = true;
if (!remap && !cctx.mvcc().addFuture(this))
throw new IllegalStateException("Duplicate future ID: " + this);
} else
trackable = false;
proceedMapping();
}
Aggregations