use of org.apache.ignite.internal.processors.cache.mvcc.txlog.TxKey in project ignite by apache.
the class MvccProcessorImpl method waitForLock.
/**
* {@inheritDoc}
*/
@Override
public IgniteInternalFuture<Void> waitForLock(GridCacheContext cctx, MvccVersion waiterVer, MvccVersion blockerVer) {
TxKey key = new TxKey(blockerVer.coordinatorVersion(), blockerVer.counter());
LockFuture fut = new LockFuture(cctx.ioPolicy(), waiterVer);
Waiter waiter = waitMap.merge(key, fut, Waiter::concat);
if (!waiter.hasLocalTransaction() && (waiter = waitMap.remove(key)) != null)
waiter.run(ctx);
else {
DeadlockDetectionManager.DelayedDeadlockComputation delayedComputation = ctx.cache().context().deadlockDetectionMgr().initDelayedComputation(waiterVer, blockerVer);
if (delayedComputation != null)
fut.listen(fut0 -> delayedComputation.cancel());
}
return fut;
}
use of org.apache.ignite.internal.processors.cache.mvcc.txlog.TxKey in project ignite by apache.
the class MvccCachingManager method onTxFinished.
/**
* @param tx Transaction.
* @param commit {@code True} if commit.
*/
public void onTxFinished(IgniteInternalTx tx, boolean commit) throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Transaction finished: [commit=" + commit + ", tx=" + tx + ']');
if (tx.system() || tx.internal() || tx.mvccSnapshot() == null)
return;
cntrs.remove(new TxKey(tx.mvccSnapshot().coordinatorVersion(), tx.mvccSnapshot().counter()));
EnlistBuffer buf = enlistCache.remove(tx.xidVersion());
Map<Integer, Map<KeyCacheObject, MvccTxEntry>> allCached = buf == null ? null : buf.getCached();
TxCounters txCntrs = tx.txCounters(false);
Collection<PartitionUpdateCountersMessage> cntrsColl = txCntrs == null ? null : txCntrs.updateCounters();
if (txCntrs == null || F.isEmpty(cntrsColl))
return;
GridIntList cacheIds = tx.txState().cacheIds();
assert cacheIds != null;
for (int i = 0; i < cacheIds.size(); i++) {
int cacheId = cacheIds.get(i);
GridCacheContext ctx0 = cctx.cacheContext(cacheId);
assert ctx0 != null;
ctx0.group().listenerLock().readLock().lock();
try {
boolean hasListeners = ctx0.hasContinuousQueryListeners(tx);
boolean drEnabled = ctx0.isDrEnabled();
if (!hasListeners && !drEnabled)
// There are no listeners to notify.
continue;
// Get cached entries for the given cache.
Map<KeyCacheObject, MvccTxEntry> cached = allCached == null ? null : allCached.get(cacheId);
Map<Integer, Map<Integer, T2<AtomicLong, Long>>> cntrsMap = countersPerPartition(cntrsColl);
Map<Integer, T2<AtomicLong, Long>> cntrPerCache = cntrsMap.get(cacheId);
if (F.isEmpty(cntrPerCache))
// No updates were made for this cache.
continue;
boolean fakeEntries = false;
if (F.isEmpty(cached)) {
if (log.isDebugEnabled())
log.debug("Transaction updates were not cached fully (this can happen when listener started" + " during the transaction execution). [tx=" + tx + ']');
if (hasListeners) {
// Create fake update entries if we have CQ listeners.
cached = createFakeCachedEntries(cntrPerCache, tx, cacheId);
fakeEntries = true;
} else
// Nothing to do further if tx is not cached entirely and there are no any CQ listeners.
continue;
}
if (F.isEmpty(cached))
continue;
// Feed CQ & DR with entries.
for (Map.Entry<KeyCacheObject, MvccTxEntry> entry : cached.entrySet()) {
MvccTxEntry e = entry.getValue();
assert e.key().partition() != -1;
assert cntrPerCache != null;
assert e.cacheId() == cacheId;
T2<AtomicLong, Long> cntr = cntrPerCache.get(e.key().partition());
long resCntr = cntr.getKey().incrementAndGet();
assert resCntr <= cntr.getValue();
e.updateCounter(resCntr);
if (ctx0.group().sharedGroup()) {
ctx0.group().onPartitionCounterUpdate(cacheId, e.key().partition(), resCntr, tx.topologyVersion(), tx.local());
}
if (log.isDebugEnabled())
log.debug("Process cached entry:" + e);
// DR
if (ctx0.isDrEnabled() && !fakeEntries) {
ctx0.dr().replicate(e.key(), e.value(), e.ttl(), e.expireTime(), e.version(), tx.local() ? DR_PRIMARY : DR_BACKUP, e.topologyVersion());
}
// CQ
CacheContinuousQueryManager contQryMgr = ctx0.continuousQueries();
if (ctx0.continuousQueries().notifyContinuousQueries(tx)) {
Map<UUID, CacheContinuousQueryListener> lsnrCol = continuousQueryListeners(ctx0, tx);
if (!F.isEmpty(lsnrCol)) {
contQryMgr.onEntryUpdated(lsnrCol, e.key(), // Force skip update counter if rolled back.
commit ? e.value() : null, // Force skip update counter if rolled back.
commit ? e.oldValue() : null, false, e.key().partition(), tx.local(), false, e.updateCounter(), null, e.topologyVersion());
}
}
}
} finally {
ctx0.group().listenerLock().readLock().unlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.mvcc.txlog.TxKey in project ignite by apache.
the class MvccCachingManager method addEnlisted.
/**
* Adds enlisted tx entry to cache.
*
* @param key Key.
* @param val Value.
* @param ttl Time to live.
* @param expireTime Expire time.
* @param ver Version.
* @param oldVal Old value.
* @param primary Flag whether this is a primary node.
* @param topVer Topology version.
* @param mvccVer Mvcc version.
* @param cacheId Cache id.
* @param tx Transaction.
* @param futId Dht future id.
* @param batchNum Batch number (for batches reordering prevention).
* @throws IgniteCheckedException If failed.
*/
public void addEnlisted(KeyCacheObject key, @Nullable CacheObject val, long ttl, long expireTime, GridCacheVersion ver, CacheObject oldVal, boolean primary, AffinityTopologyVersion topVer, MvccVersion mvccVer, int cacheId, IgniteInternalTx tx, IgniteUuid futId, int batchNum) throws IgniteCheckedException {
assert key != null;
assert mvccVer != null;
assert tx != null;
if (log.isDebugEnabled()) {
log.debug("Added entry to mvcc cache: [key=" + key + ", val=" + val + ", oldVal=" + oldVal + ", primary=" + primary + ", mvccVer=" + mvccVer + ", cacheId=" + cacheId + ", ver=" + ver + ']');
}
// Do not cache updates if there are no DR or CQ were enabled when cache was added as active for the current tx.
if (!tx.txState().useMvccCaching(cacheId))
return;
AtomicInteger cntr = cntrs.computeIfAbsent(new TxKey(mvccVer.coordinatorVersion(), mvccVer.counter()), v -> new AtomicInteger());
if (cntr.incrementAndGet() > TX_SIZE_THRESHOLD)
throw new IgniteCheckedException("Transaction is too large. Consider reducing transaction size or " + "turning off continuous queries and datacenter replication [size=" + cntr.get() + ", txXid=" + ver + ']');
MvccTxEntry e = new MvccTxEntry(key, val, ttl, expireTime, ver, oldVal, primary, topVer, mvccVer, cacheId);
EnlistBuffer cached = enlistCache.computeIfAbsent(ver, v -> new EnlistBuffer());
cached.add(primary ? null : futId, primary ? -1 : batchNum, e);
}
Aggregations