use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxPrepareFuture method prepare0.
/**
*/
private void prepare0() {
try {
if (tx.serializable() && tx.optimistic()) {
IgniteCheckedException err0;
try {
err0 = checkReadConflict(req.writes());
if (err0 == null)
err0 = checkReadConflict(req.reads());
} catch (IgniteCheckedException e) {
U.error(log, "Failed to check entry version: " + e, e);
err0 = e;
}
if (err0 != null) {
ERR_UPD.compareAndSet(this, null, err0);
try {
tx.rollbackAsync();
} catch (Throwable e) {
err0.addSuppressed(e);
}
final GridNearTxPrepareResponse res = createPrepareResponse(err);
onDone(res, res.error());
return;
}
}
onEntriesLocked();
// We are holding transaction-level locks for entries here, so we can get next write version.
tx.writeVersion(cctx.versions().next(tx.topologyVersion()));
// Assign keys to primary nodes.
if (!F.isEmpty(req.writes())) {
for (IgniteTxEntry write : req.writes()) map(tx.entry(write.txKey()));
}
if (!F.isEmpty(req.reads())) {
for (IgniteTxEntry read : req.reads()) map(tx.entry(read.txKey()));
}
if (isDone())
return;
if (last)
sendPrepareRequests();
} finally {
markInitialized();
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxPrepareFuture method readyLocks.
/**
* @param checkEntries Entries.
*/
private void readyLocks(Iterable<IgniteTxEntry> checkEntries) {
for (IgniteTxEntry txEntry : checkEntries) {
GridCacheContext cacheCtx = txEntry.context();
if (cacheCtx.isLocal())
continue;
GridDistributedCacheEntry entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry == null) {
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
if (tx.optimistic() && txEntry.explicitVersion() == null) {
synchronized (this) {
lockKeys.add(txEntry.txKey());
}
}
while (true) {
try {
assert txEntry.explicitVersion() == null || entry.lockedBy(txEntry.explicitVersion());
CacheLockCandidates owners = entry.readyLock(tx.xidVersion());
if (log.isDebugEnabled())
log.debug("Current lock owners for entry [owner=" + owners + ", entry=" + entry + ']');
// While.
break;
}// Possible if entry cached within transaction is obsolete.
catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in future onAllReplies method (will retry): " + txEntry);
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtTxRemote method addWrite.
/**
* @param cacheCtx Cache context.
* @param op Write operation.
* @param key Key to add to write set.
* @param val Value.
* @param entryProcessors Entry processors.
* @param ttl TTL.
* @param skipStore Skip store flag.
*/
public void addWrite(GridCacheContext cacheCtx, GridCacheOperation op, IgniteTxKey key, @Nullable CacheObject val, @Nullable Collection<T2<EntryProcessor<Object, Object, Object>, Object[]>> entryProcessors, long ttl, boolean skipStore, boolean keepBinary) {
checkInternal(key);
if (isSystemInvalidate())
return;
GridDhtCacheEntry cached = cacheCtx.dht().entryExx(key.key(), topologyVersion());
IgniteTxEntry txEntry = new IgniteTxEntry(cacheCtx, this, op, val, ttl, -1L, cached, null, skipStore, keepBinary);
txEntry.entryProcessors(entryProcessors);
txState.addWriteEntry(key, txEntry);
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtColocatedLockFuture method addEntry.
/**
* Adds entry to future.
*
* @param entry Entry to add.
* @return Non-reentry candidate if lock should be acquired on remote node,
* reentry candidate if locks has been already acquired and {@code null} if explicit locks is held and
* implicit transaction accesses locked entry.
* @throws IgniteCheckedException If failed to add entry due to external locking.
*/
@Nullable
private GridCacheMvccCandidate addEntry(GridDistributedCacheEntry entry) throws IgniteCheckedException {
IgniteTxKey txKey = entry.txKey();
GridCacheMvccCandidate cand = cctx.mvcc().explicitLock(threadId, txKey);
if (inTx()) {
if (cand != null) {
if (!tx.implicit())
throw new IgniteCheckedException("Cannot access key within transaction if lock is " + "externally held [key=" + entry.key() + ", entry=" + entry + ']');
else
return null;
} else {
IgniteTxEntry txEntry = tx.entry(txKey);
assert txEntry != null;
txEntry.cached(entry);
// Check transaction entries (corresponding tx entries must be enlisted in transaction).
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, txEntry.locked(), inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
}
} else {
if (cand == null) {
cand = new GridCacheMvccCandidate(entry, cctx.localNodeId(), null, null, threadId, lockVer, true, false, inTx(), inTx() && tx.implicitSingle(), false, false, null, false);
cand.topologyVersion(topVer);
} else
cand = cand.reenter();
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
}
return cand;
}
use of org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry in project ignite by apache.
the class GridDhtLockFuture method map.
/**
* @param entries Entries.
*/
private void map(Iterable<GridDhtCacheEntry> entries) {
synchronized (this) {
if (mapped)
return;
mapped = true;
}
try {
if (log.isDebugEnabled())
log.debug("Mapping entry for DHT lock future: " + this);
// Assign keys to primary nodes.
for (GridDhtCacheEntry entry : entries) {
try {
while (true) {
try {
cctx.dhtMap(nearNodeId, topVer, entry, tx == null ? lockVer : null, log, dhtMap, null);
GridCacheMvccCandidate cand = entry.candidate(lockVer);
// Possible in case of lock cancellation.
if (cand == null) {
onFailed(false);
// Will mark initialized in finally block.
return;
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when mapping DHT lock future (will retry): " + entry);
entry = cctx.dht().entryExx(entry.key(), topVer);
}
}
} catch (GridDhtInvalidPartitionException e) {
assert false : "DHT lock should never get invalid partition [err=" + e + ", fut=" + this + ']';
}
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Mapping won't proceed because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Mapped DHT lock future [dhtMap=" + F.nodeIds(dhtMap.keySet()) + ", dhtLockFut=" + this + ']');
long timeout = inTx() ? tx.remainingTime() : this.timeout;
// Create mini futures.
for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapped : dhtMap.entrySet()) {
ClusterNode n = mapped.getKey();
List<GridDhtCacheEntry> dhtMapping = mapped.getValue();
int cnt = F.size(dhtMapping);
if (cnt > 0) {
assert !n.id().equals(cctx.localNodeId());
if (inTx() && tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(n, dhtMapping);
GridDhtLockRequest req = new GridDhtLockRequest(cctx.cacheId(), nearNodeId, inTx() ? tx.nearXidVersion() : null, threadId, futId, fut.futureId(), lockVer, topVer, inTx(), read, isolation(), isInvalidate(), timeout, cnt, 0, inTx() ? tx.size() : cnt, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? accessTtl : -1L, skipStore, cctx.store().configured(), keepBinary, cctx.deploymentEnabled());
try {
for (ListIterator<GridDhtCacheEntry> it = dhtMapping.listIterator(); it.hasNext(); ) {
GridDhtCacheEntry e = it.next();
boolean needVal = false;
try {
// Must unswap entry so that isNewLocked returns correct value.
e.unswap(false);
needVal = e.isNewLocked();
if (needVal) {
List<ClusterNode> owners = cctx.topology().owners(e.partition(), tx != null ? tx.topologyVersion() : cctx.affinity().affinityTopologyVersion());
// Do not preload if local node is partition owner.
if (owners.contains(cctx.localNode()))
needVal = false;
}
} catch (GridCacheEntryRemovedException ex) {
assert false : "Entry cannot become obsolete when DHT local candidate is added " + "[e=" + e + ", ex=" + ex + ']';
}
// Skip entry if it is not new and is not present in updated mapping.
if (tx != null && !needVal)
continue;
boolean invalidateRdr = e.readerId(n.id()) != null;
req.addDhtKey(e.key(), invalidateRdr, cctx);
if (needVal) {
// Mark last added key as needed to be preloaded.
req.markLastKeyForPreload();
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(e.txKey());
// NOOP entries will be sent to backups on prepare step.
if (txEntry.op() == GridCacheOperation.READ)
txEntry.op(GridCacheOperation.NOOP);
}
}
it.set(addOwned(req, e));
}
if (!F.isEmpty(req.keys())) {
if (tx != null)
tx.addLockTransactionNode(n);
// Append new future.
add(fut);
cctx.io().send(n, req, cctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT lock fut, sent request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", nodeId=" + n.id() + ']');
}
}
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onResult();
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT lock fut, failed to send request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", node=" + n.id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
}
} finally {
markInitialized();
}
}
Aggregations