use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param nearNode Near node.
* @param req Request.
* @param filter0 Filter.
* @return Future.
*/
public IgniteInternalFuture<GridNearLockResponse> lockAllAsync(final GridCacheContext<?, ?> cacheCtx, final ClusterNode nearNode, final GridNearLockRequest req, @Nullable final CacheEntryPredicate[] filter0) {
final List<KeyCacheObject> keys = req.keys();
CacheEntryPredicate[] filter = filter0;
// Set message into thread context.
GridDhtTxLocal tx = null;
try {
int cnt = keys.size();
if (req.inTx()) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
}
final List<GridCacheEntryEx> entries = new ArrayList<>(cnt);
// Unmarshal filter first.
if (filter == null)
filter = req.filter();
GridDhtLockFuture fut = null;
if (!req.inTx()) {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert CU.clientNode(nearNode);
top = topology();
topology().readLock();
}
try {
if (top != null && needRemap(req.topologyVersion(), top.topologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.topologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.topologyVersion());
return new GridFinishedFuture<>(res);
}
fut = new GridDhtLockFuture(ctx, nearNode.id(), req.version(), req.topologyVersion(), cnt, req.txRead(), req.needReturnValue(), req.timeout(), tx, req.threadId(), req.createTtl(), req.accessTtl(), filter, req.skipStore(), req.keepBinary());
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
} finally {
if (top != null)
top.readUnlock();
}
}
boolean timedout = false;
for (KeyCacheObject key : keys) {
if (timedout)
break;
while (true) {
// Specify topology version to make sure containment is checked
// based on the requested version, not the latest.
GridDhtCacheEntry entry = entryExx(key, req.topologyVersion());
try {
if (fut != null) {
// This method will add local candidate.
// Entry cannot become obsolete after this method succeeded.
fut.addEntry(key == null ? null : entry);
if (fut.isDone()) {
timedout = true;
break;
}
}
entries.add(entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Got lock request for cancelled lock (will ignore): " + entry);
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
if (tx == null) {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert CU.clientNode(nearNode);
top = topology();
topology().readLock();
}
try {
if (top != null && needRemap(req.topologyVersion(), top.topologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.topologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.topologyVersion());
return new GridFinishedFuture<>(res);
}
tx = new GridDhtTxLocal(ctx.shared(), req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), /*implicitTx*/
false, /*implicitSingleTx*/
false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.timeout(), req.isInvalidate(), !req.skipStore(), false, req.txSize(), null, req.subjectId(), req.taskNameHash());
if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + req.version();
U.warn(log, msg);
if (tx != null)
tx.rollbackDhtLocal();
return new GridDhtFinishedFuture<>(new IgniteCheckedException(msg));
}
tx.topologyVersion(req.topologyVersion());
} finally {
if (top != null)
top.readUnlock();
}
}
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing DHT lock [tx=" + tx + ", entries=" + entries + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, entries, req.messageId(), req.txRead(), req.needReturnValue(), req.createTtl(), req.accessTtl(), req.skipStore(), req.keepBinary());
final GridDhtTxLocal t = tx;
return new GridDhtEmbeddedFuture(txFut, new C2<GridCacheReturn, Exception, IgniteInternalFuture<GridNearLockResponse>>() {
@Override
public IgniteInternalFuture<GridNearLockResponse> apply(GridCacheReturn o, Exception e) {
if (e != null)
e = U.unwrap(e);
assert !t.empty();
// Create response while holding locks.
final GridNearLockResponse resp = createLockReply(nearNode, entries, req, t, t.xidVersion(), e);
assert !t.implicit() : t;
assert !t.onePhaseCommit() : t;
sendLockReply(nearNode, t, req, resp);
return new GridFinishedFuture<>(resp);
}
});
} else {
assert fut != null;
// This will send remote messages.
fut.map();
final GridCacheVersion mappedVer = fut.version();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, GridNearLockResponse>() {
@Override
public GridNearLockResponse apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(req.version());
GridNearLockResponse res = createLockReply(nearNode, entries, req, null, mappedVer, e);
sendLockReply(nearNode, null, req, res);
return res;
}
}, fut);
}
} catch (IgniteCheckedException | RuntimeException e) {
String err = "Failed to unmarshal at least one of the keys for lock request message: " + req;
U.error(log, err, e);
if (tx != null) {
try {
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
}
return new GridDhtFinishedFuture<>(new IgniteCheckedException(err, e));
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException in project ignite by apache.
the class GridDhtColocatedCache method lockAllAsync0.
/**
* @param cacheCtx Cache context.
* @param tx Started colocated transaction (if any).
* @param threadId Thread ID.
* @param ver Lock version.
* @param topVer Topology version.
* @param keys Mapped keys.
* @param txRead Tx read.
* @param retval Return value flag.
* @param timeout Lock timeout.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param filter filter Optional filter.
* @param skipStore Skip store flag.
* @return Lock future.
*/
private IgniteInternalFuture<Exception> lockAllAsync0(GridCacheContext<?, ?> cacheCtx, @Nullable final GridNearTxLocal tx, long threadId, final GridCacheVersion ver, AffinityTopologyVersion topVer, final Collection<KeyCacheObject> keys, final boolean txRead, boolean retval, final long timeout, final long createTtl, final long accessTtl, @Nullable final CacheEntryPredicate[] filter, boolean skipStore, boolean keepBinary) {
int cnt = keys.size();
if (tx == null) {
GridDhtLockFuture fut = new GridDhtLockFuture(ctx, ctx.localNodeId(), ver, topVer, cnt, txRead, retval, timeout, tx, threadId, createTtl, accessTtl, filter, skipStore, keepBinary);
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
boolean timedout = false;
for (KeyCacheObject key : keys) {
if (timedout)
break;
while (true) {
GridDhtCacheEntry entry = entryExx(key, topVer);
try {
fut.addEntry(key == null ? null : entry);
if (fut.isDone())
timedout = true;
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Failed to add entry [err=" + e + ", entry=" + entry + ']');
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// This will send remote messages.
fut.map();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, Exception>() {
@Override
public Exception apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(ver);
return e;
}
}, fut);
} else {
// Handle implicit locks for pessimistic transactions.
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing colocated lock [tx=" + tx + ", keys=" + keys + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, keys, retval, txRead, createTtl, accessTtl, skipStore, keepBinary);
return new GridDhtEmbeddedFuture<>(new C2<GridCacheReturn, Exception, Exception>() {
@Override
public Exception apply(GridCacheReturn ret, Exception e) {
if (e != null)
e = U.unwrap(e);
assert !tx.empty();
return e;
}
}, txFut);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException in project ignite by apache.
the class IgniteTxManager method lockMultiple.
/**
* @param tx Transaction.
* @param entries Entries to lock.
* @return {@code True} if all keys were locked.
* @throws IgniteCheckedException If lock has been cancelled.
*/
private boolean lockMultiple(IgniteInternalTx tx, Iterable<IgniteTxEntry> entries) throws IgniteCheckedException {
assert tx.optimistic() || !tx.local();
long remainingTime = tx.remainingTime();
// For serializable transactions, failure to acquire lock means
// that there is a serializable conflict. For all other isolation levels,
// we wait for the lock.
long timeout = remainingTime < 0 ? 0 : remainingTime;
GridCacheVersion serOrder = (tx.serializable() && tx.optimistic()) ? tx.nearXidVersion() : null;
for (IgniteTxEntry txEntry1 : entries) {
// Check if this entry was prepared before.
if (!txEntry1.markPrepared() || txEntry1.explicitVersion() != null)
continue;
GridCacheContext cacheCtx = txEntry1.context();
while (true) {
try {
GridCacheEntryEx entry1 = txEntry1.cached();
assert entry1 != null : txEntry1;
assert !entry1.detached() : "Expected non-detached entry for near transaction " + "[locNodeId=" + cctx.localNodeId() + ", entry=" + entry1 + ']';
GridCacheVersion serReadVer = txEntry1.entryReadVersion();
assert serReadVer == null || (tx.optimistic() && tx.serializable()) : txEntry1;
boolean read = serOrder != null && txEntry1.op() == READ;
entry1.unswap();
if (!entry1.tmLock(tx, timeout, serOrder, serReadVer, read)) {
// Unlock locks locked so far.
for (IgniteTxEntry txEntry2 : entries) {
if (txEntry2 == txEntry1)
break;
txEntry2.cached().txUnlock(tx);
}
return false;
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in TM lockMultiple(..) method (will retry): " + txEntry1);
try {
// Renew cache entry.
txEntry1.cached(cacheCtx.cache().entryEx(txEntry1.key(), tx.topologyVersion()));
} catch (GridDhtInvalidPartitionException e) {
assert tx.dht() : "Received invalid partition for non DHT transaction [tx=" + tx + ", invalidPart=" + e.partition() + ']';
// If partition is invalid, we ignore this entry.
tx.addInvalidPartition(cacheCtx, e.partition());
break;
}
} catch (GridDistributedLockCancelledException ignore) {
tx.setRollbackOnly();
throw new IgniteCheckedException("Entry lock has been cancelled for transaction: " + tx);
}
}
}
return true;
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method processDhtLockRequest0.
/**
* @param nodeId Node ID.
* @param req Request.
*/
private void processDhtLockRequest0(UUID nodeId, GridDhtLockRequest req) {
assert nodeId != null;
assert req != null;
assert !nodeId.equals(locNodeId);
int cnt = F.size(req.keys());
GridDhtLockResponse res;
GridDhtTxRemote dhtTx = null;
GridNearTxRemote nearTx = null;
boolean fail = false;
boolean cancelled = false;
try {
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), cnt, ctx.deploymentEnabled());
dhtTx = startRemoteTx(nodeId, req, res);
nearTx = isNearEnabled(cacheCfg) ? near().startRemoteTx(nodeId, req) : null;
if (nearTx != null && !nearTx.empty())
res.nearEvicted(nearTx.evicted());
else {
if (!F.isEmpty(req.nearKeys())) {
Collection<IgniteTxKey> nearEvicted = new ArrayList<>(req.nearKeys().size());
nearEvicted.addAll(F.viewReadOnly(req.nearKeys(), new C1<KeyCacheObject, IgniteTxKey>() {
@Override
public IgniteTxKey apply(KeyCacheObject k) {
return ctx.txKey(k);
}
}));
res.nearEvicted(nearEvicted);
}
}
} catch (IgniteTxRollbackCheckedException e) {
String err = "Failed processing DHT lock request (transaction has been completed): " + req;
U.error(log, err, e);
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteTxRollbackCheckedException(err, e), ctx.deploymentEnabled());
fail = true;
} catch (IgniteCheckedException e) {
String err = "Failed processing DHT lock request: " + req;
U.error(log, err, e);
res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteCheckedException(err, e), ctx.deploymentEnabled());
fail = true;
} catch (GridDistributedLockCancelledException ignored) {
// Received lock request for cancelled lock.
if (log.isDebugEnabled())
log.debug("Received lock request for canceled lock (will ignore): " + req);
res = null;
fail = true;
cancelled = true;
}
boolean releaseAll = false;
if (res != null) {
try {
// Reply back to sender.
ctx.io().send(nodeId, res, ctx.ioPolicy());
if (txLockMsgLog.isDebugEnabled()) {
txLockMsgLog.debug("Sent dht lock response [txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
U.warn(txLockMsgLog, "Failed to send dht lock response, node failed [" + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
fail = true;
releaseAll = true;
} catch (IgniteCheckedException e) {
U.error(txLockMsgLog, "Failed to send dht lock response (lock will not be acquired) " + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']', e);
fail = true;
}
}
if (fail) {
if (dhtTx != null)
dhtTx.rollbackRemoteTx();
if (// Even though this should never happen, we leave this check for consistency.
nearTx != null)
nearTx.rollbackRemoteTx();
List<KeyCacheObject> keys = req.keys();
if (keys != null) {
for (KeyCacheObject key : keys) {
while (true) {
GridDistributedCacheEntry entry = peekExx(key);
try {
if (entry != null) {
// Release all locks because sender node left grid.
if (releaseAll)
entry.removeExplicitNodeLocks(req.nodeId());
else
entry.removeLock(req.version());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Attempted to remove lock on removed entity during during failure " + "handling for dht lock request (will retry): " + entry);
}
}
}
}
if (releaseAll && !cancelled)
U.warn(log, "Sender node left grid in the midst of lock acquisition (locks have been released).");
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsyncInternal.
/**
* Acquires locks in partitioned cache.
*
* @param keys Keys to lock.
* @param timeout Lock timeout.
* @param txx Transaction.
* @param isInvalidate Invalidate flag.
* @param isRead Read flag.
* @param retval Return value flag.
* @param isolation Transaction isolation.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param filter Optional filter.
* @param skipStore Skip store flag.
* @return Lock future.
*/
public GridDhtFuture<Boolean> lockAllAsyncInternal(@Nullable Collection<KeyCacheObject> keys, long timeout, IgniteTxLocalEx txx, boolean isInvalidate, boolean isRead, boolean retval, TransactionIsolation isolation, long createTtl, long accessTtl, CacheEntryPredicate[] filter, boolean skipStore, boolean keepBinary) {
if (keys == null || keys.isEmpty())
return new GridDhtFinishedFuture<>(true);
GridDhtTxLocalAdapter tx = (GridDhtTxLocalAdapter) txx;
assert tx != null;
GridDhtLockFuture fut = new GridDhtLockFuture(ctx, tx.nearNodeId(), tx.nearXidVersion(), tx.topologyVersion(), keys.size(), isRead, retval, timeout, tx, tx.threadId(), createTtl, accessTtl, filter, skipStore, keepBinary);
for (KeyCacheObject key : keys) {
try {
while (true) {
GridDhtCacheEntry entry = entryExx(key, tx.topologyVersion());
try {
fut.addEntry(entry);
// Possible in case of cancellation or time out.
if (fut.isDone())
return fut;
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Failed to add entry [err=" + e + ", entry=" + entry + ']');
return new GridDhtFinishedFuture<>(e);
}
}
} catch (GridDhtInvalidPartitionException e) {
fut.addInvalidPartition(ctx, e.partition());
if (log.isDebugEnabled())
log.debug("Added invalid partition to DHT lock future [part=" + e.partition() + ", fut=" + fut + ']');
}
}
ctx.mvcc().addFuture(fut);
fut.map();
return fut;
}
Aggregations