use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtCacheEntry method addDhtLocal.
/**
* Add local candidate.
*
* @param nearNodeId Near node ID.
* @param nearVer Near version.
* @param topVer Topology version.
* @param threadId Owning thread ID.
* @param ver Lock version.
* @param serOrder Version for serializable transactions ordering.
* @param timeout Timeout to acquire lock.
* @param reenter Reentry flag.
* @param tx Tx flag.
* @param implicitSingle Implicit flag.
* @param read Read lock flag.
* @return New candidate.
* @throws GridCacheEntryRemovedException If entry has been removed.
* @throws GridDistributedLockCancelledException If lock was cancelled.
*/
@Nullable
GridCacheMvccCandidate addDhtLocal(UUID nearNodeId, GridCacheVersion nearVer, AffinityTopologyVersion topVer, long threadId, GridCacheVersion ver, @Nullable GridCacheVersion serOrder, long timeout, boolean reenter, boolean tx, boolean implicitSingle, boolean read) throws GridCacheEntryRemovedException, GridDistributedLockCancelledException {
assert !reenter || serOrder == null;
GridCacheMvccCandidate cand;
CacheLockCandidates prev;
CacheLockCandidates owner;
CacheObject val;
lockEntry();
try {
// Check removed locks prior to obsolete flag.
checkRemoved(ver);
checkRemoved(nearVer);
checkObsolete();
GridCacheMvcc mvcc = mvccExtras();
if (mvcc == null) {
mvcc = new GridCacheMvcc(cctx);
mvccExtras(mvcc);
}
prev = mvcc.allOwners();
boolean emptyBefore = mvcc.isEmpty();
cand = mvcc.addLocal(this, nearNodeId, nearVer, threadId, ver, timeout, serOrder, reenter, tx, implicitSingle, /*dht-local*/
true, read);
if (cand == null)
return null;
cand.topologyVersion(topVer);
owner = mvcc.allOwners();
if (owner != null)
cand.ownerVersion(owner.candidate(0).version());
boolean emptyAfter = mvcc.isEmpty();
checkCallbacks(emptyBefore, emptyAfter);
val = this.val;
if (mvcc.isEmpty())
mvccExtras(null);
} finally {
unlockEntry();
}
// Don't link reentries.
if (!cand.reentry())
// Link with other candidates in the same thread.
cctx.mvcc().addNext(cctx, cand);
checkOwnerChanged(prev, owner, val);
return cand;
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridLocalLockFuture method addEntry.
/**
* Adds entry to future.
*
* @param entry Entry to add.
* @return Lock candidate.
* @throws GridCacheEntryRemovedException If entry was removed.
*/
@Nullable
private GridCacheMvccCandidate addEntry(GridLocalCacheEntry entry) throws GridCacheEntryRemovedException {
// Add local lock first, as it may throw GridCacheEntryRemovedException.
GridCacheMvccCandidate c = entry.addLocal(threadId, lockVer, null, null, timeout, !inTx(), inTx(), implicitSingle(), false);
entries.add(entry);
if (c == null && timeout < 0) {
if (log.isDebugEnabled())
log.debug("Failed to acquire lock with negative timeout: " + entry);
onFailed();
return null;
}
if (c != null) {
// Immediately set lock to ready.
entry.readyLocal(c);
}
return c;
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class IgniteTxManager method txLocksInfo.
/**
* @param txKeys Tx keys.
* @return Transactions locks and nodes.
*/
private TxLocksResponse txLocksInfo(Collection<IgniteTxKey> txKeys) {
TxLocksResponse res = new TxLocksResponse();
Collection<IgniteInternalTx> txs = activeTransactions();
for (IgniteInternalTx tx : txs) {
boolean nearTxLoc = tx instanceof GridNearTxLocal;
if (!(nearTxLoc || tx instanceof GridDhtTxLocal) || !hasKeys(tx, txKeys))
continue;
IgniteTxState state = tx.txState();
assert state instanceof IgniteTxStateImpl || state instanceof IgniteTxImplicitSingleStateImpl;
Collection<IgniteTxEntry> txEntries = state instanceof IgniteTxStateImpl ? ((IgniteTxStateImpl) state).allEntriesCopy() : state.allEntries();
Set<IgniteTxKey> requestedKeys = null;
// in order to reduce amount of requests to remote nodes.
if (nearTxLoc) {
if (tx.pessimistic()) {
GridDhtColocatedLockFuture fut = (GridDhtColocatedLockFuture) mvccFuture(tx, GridDhtColocatedLockFuture.class);
if (fut != null)
requestedKeys = fut.requestedKeys();
GridNearLockFuture nearFut = (GridNearLockFuture) mvccFuture(tx, GridNearLockFuture.class);
if (nearFut != null) {
Set<IgniteTxKey> nearRequestedKeys = nearFut.requestedKeys();
if (nearRequestedKeys != null) {
if (requestedKeys == null)
requestedKeys = nearRequestedKeys;
else
requestedKeys = nearRequestedKeys;
}
}
} else {
GridNearOptimisticTxPrepareFuture fut = (GridNearOptimisticTxPrepareFuture) mvccFuture(tx, GridNearOptimisticTxPrepareFuture.class);
if (fut != null)
requestedKeys = fut.requestedKeys();
}
}
for (IgniteTxEntry txEntry : txEntries) {
IgniteTxKey txKey = txEntry.txKey();
if (res.txLocks(txKey) == null) {
GridCacheMapEntry e = (GridCacheMapEntry) txEntry.cached();
List<GridCacheMvccCandidate> locs = e.mvccAllLocal();
if (locs != null) {
boolean owner = false;
for (GridCacheMvccCandidate loc : locs) {
if (!owner && loc.owner() && loc.tx())
owner = true;
if (// Skip all candidates in case when no tx that owns lock.
!owner)
break;
if (loc.tx()) {
UUID nearNodeId = loc.otherNodeId();
GridCacheVersion txId = loc.otherVersion();
TxLock txLock = new TxLock(txId == null ? loc.version() : txId, nearNodeId == null ? loc.nodeId() : nearNodeId, // We can get outdated value of thread ID, but this value only for information here.
loc.threadId(), loc.owner() ? TxLock.OWNERSHIP_OWNER : TxLock.OWNERSHIP_CANDIDATE);
res.addTxLock(txKey, txLock);
}
}
} else // Special case for optimal sequence of nodes processing.
if (nearTxLoc && requestedKeys != null && requestedKeys.contains(txKey)) {
TxLock txLock = new TxLock(tx.nearXidVersion(), tx.nodeId(), tx.threadId(), TxLock.OWNERSHIP_REQUESTED);
res.addTxLock(txKey, txLock);
} else
res.addKey(txKey);
}
}
}
return res;
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridCommandHandlerTest method testKillHangingRemoteTransactions.
/**
* Simulate uncommitted backup transactions and test rolling back using utility.
*/
@Test
public void testKillHangingRemoteTransactions() throws Exception {
final int cnt = 3;
startGridsMultiThreaded(cnt);
Ignite[] clients = new Ignite[] { startGrid("client1"), startGrid("client2"), startGrid("client3"), startGrid("client4") };
clients[0].getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setBackups(2).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC).setAffinity(new RendezvousAffinityFunction(false, 64)));
awaitPartitionMapExchange();
for (Ignite client : clients) {
assertTrue(client.configuration().isClientMode());
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
}
LongAdder progress = new LongAdder();
AtomicInteger idx = new AtomicInteger();
int tc = clients.length;
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch commitLatch = new CountDownLatch(1);
Ignite prim = primaryNode(0L, DEFAULT_CACHE_NAME);
TestRecordingCommunicationSpi primSpi = TestRecordingCommunicationSpi.spi(prim);
primSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message message) {
return message instanceof GridDhtTxFinishRequest;
}
});
Set<IgniteUuid> xidSet = new GridConcurrentHashSet<>();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
int id = idx.getAndIncrement();
Ignite client = clients[id];
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED, 0, 1)) {
xidSet.add(tx.xid());
IgniteCache<Long, Long> cache = client.cache(DEFAULT_CACHE_NAME);
if (id != 0)
U.awaitQuiet(lockLatch);
cache.invoke(0L, new IncrementClosure(), null);
if (id == 0) {
lockLatch.countDown();
U.awaitQuiet(commitLatch);
// Wait until candidates will enqueue.
doSleep(500);
}
tx.commit();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
progress.increment();
}
}, tc, "invoke-thread");
U.awaitQuiet(lockLatch);
commitLatch.countDown();
primSpi.waitForBlocked(clients.length);
// Unblock only finish messages from clients from 2 to 4.
primSpi.stopBlock(true, blockedMsg -> {
GridIoMessage iom = blockedMsg.ioMessage();
Message m = iom.message();
if (m instanceof GridDhtTxFinishRequest) {
GridDhtTxFinishRequest r = (GridDhtTxFinishRequest) m;
return !r.nearNodeId().equals(clients[0].cluster().localNode().id());
}
return true;
});
// Wait until queue is stable
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteInternalTx tx : txs) if (!tx.local()) {
IgniteTxEntry entry = tx.writeEntries().iterator().next();
GridCacheEntryEx cached = entry.cached();
Collection<GridCacheMvccCandidate> candidates = cached.remoteMvccSnapshot();
if (candidates.size() != clients.length)
return false;
}
return true;
}
}, 10_000);
}
CommandHandler h = new CommandHandler();
// Check listing.
validate(h, map -> {
for (int i = 0; i < cnt; i++) {
IgniteEx grid = grid(i);
// Skip primary.
if (grid.localNode().id().equals(prim.cluster().localNode().id()))
continue;
VisorTxTaskResult res = map.get(grid.localNode());
List<VisorTxInfo> infos = res.getInfos().stream().filter(info -> xidSet.contains(info.getNearXid())).collect(Collectors.toList());
// Validate queue length on backups.
assertEquals(clients.length, infos.size());
}
}, "--tx");
// Check kill.
validate(h, map -> {
// No-op.
}, "--tx", "--kill");
// Wait for all remote txs to finish.
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
for (IgniteInternalTx tx : txs) if (!tx.local())
tx.finishFuture().get();
}
// Unblock finish message from client1.
primSpi.stopBlock(true);
fut.get();
Long cur = (Long) clients[0].cache(DEFAULT_CACHE_NAME).get(0L);
assertEquals(tc - 1, cur.longValue());
checkUserFutures();
}
use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.
the class GridDhtColocatedCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param threadId Thread ID.
* @param ver Lock version.
* @param keys Keys.
*/
public void removeLocks(long threadId, GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (KeyCacheObject key : keys) {
IgniteTxKey txKey = ctx.txKey(key);
GridCacheMvccCandidate lock = ctx.mvcc().removeExplicitLock(threadId, txKey, ver);
if (lock != null) {
AffinityTopologyVersion topVer = lock.topologyVersion();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, topVer);
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to remove locks (all partition nodes left the grid).");
continue;
}
if (!primary.isLocal()) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
GridCacheEntryEx entry = peekEx(key);
KeyCacheObject key0 = entry != null ? entry.key() : key;
req.addKey(key0, ctx);
} else
locKeys.add(key);
}
}
if (!locKeys.isEmpty())
removeLocks(ctx.localNodeId(), ver, locKeys, true);
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
try {
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to send unlock request (node has left the grid) [keys=" + req.keys() + ", n=" + n + ", e=" + e + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request [keys=" + req.keys() + ", n=" + n + ']', e);
}
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
Aggregations