use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridDhtColocatedCache method lockAllAsync.
/**
* This is an entry point to pessimistic locking within transaction.
*
* {@inheritDoc}
*/
@Override
public IgniteInternalFuture<Boolean> lockAllAsync(Collection<KeyCacheObject> keys, long timeout, @Nullable IgniteTxLocalEx tx, boolean isInvalidate, boolean isRead, boolean retval, @Nullable TransactionIsolation isolation, long createTtl, long accessTtl) {
assert tx == null || tx instanceof GridNearTxLocal : tx;
GridNearTxLocal txx = (GridNearTxLocal) tx;
CacheOperationContext opCtx = ctx.operationContextPerCall();
GridDhtColocatedLockFuture fut = new GridDhtColocatedLockFuture(ctx, keys, txx, isRead, retval, timeout, createTtl, accessTtl, CU.empty0(), opCtx != null && opCtx.skipStore(), opCtx != null && opCtx.isKeepBinary(), opCtx != null && opCtx.recovery());
// Future will be added to mvcc only if it was mapped to remote nodes.
fut.map();
return fut;
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridDhtColocatedCache method getAllAsync.
/**
* {@inheritDoc}
*/
@Override
public IgniteInternalFuture<Map<K, V>> getAllAsync(@Nullable final Collection<? extends K> keys, boolean forcePrimary, boolean skipTx, String taskName, final boolean deserializeBinary, final boolean recovery, final ReadRepairStrategy readRepairStrategy, final boolean skipVals, final boolean needVer) {
ctx.checkSecurity(SecurityPermission.CACHE_READ);
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
warnIfUnordered(keys, BulkOperation.GET);
GridNearTxLocal tx = checkCurrentTx();
final CacheOperationContext opCtx = ctx.operationContextPerCall();
if (!ctx.mvccEnabled() && tx != null && !tx.implicit() && !skipTx) {
return asyncOp(tx, new AsyncOp<Map<K, V>>(keys) {
/**
* {@inheritDoc}
*/
@Override
public IgniteInternalFuture<Map<K, V>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
return tx.getAllAsync(ctx, readyTopVer, ctx.cacheKeysView(keys), deserializeBinary, skipVals, false, opCtx != null && opCtx.skipStore(), recovery, readRepairStrategy, needVer);
}
}, opCtx, /*retry*/
false);
}
MvccSnapshot mvccSnapshot = null;
MvccQueryTracker mvccTracker = null;
if (ctx.mvccEnabled()) {
try {
if (tx != null)
mvccSnapshot = MvccUtils.requestSnapshot(tx);
else {
mvccTracker = MvccUtils.mvccTracker(ctx, null);
mvccSnapshot = mvccTracker.snapshot();
}
assert mvccSnapshot != null;
} catch (IgniteCheckedException ex) {
return new GridFinishedFuture<>(ex);
}
}
AffinityTopologyVersion topVer;
if (tx != null)
topVer = tx.topologyVersion();
else if (mvccTracker != null)
topVer = mvccTracker.topologyVersion();
else
topVer = ctx.affinity().affinityTopologyVersion();
if (readRepairStrategy != null) {
return new GridNearReadRepairCheckOnlyFuture(ctx, ctx.cacheKeysView(keys), readRepairStrategy, opCtx == null || !opCtx.skipStore(), taskName, deserializeBinary, recovery, skipVals ? null : expiryPolicy(opCtx != null ? opCtx.expiry() : null), skipVals, needVer, false, tx).multi();
}
IgniteInternalFuture<Map<K, V>> fut = loadAsync(ctx.cacheKeysView(keys), opCtx == null || !opCtx.skipStore(), forcePrimary, topVer, taskName, deserializeBinary, recovery, skipVals ? null : expiryPolicy(opCtx != null ? opCtx.expiry() : null), skipVals, needVer, false, null, mvccSnapshot);
if (mvccTracker != null) {
final MvccQueryTracker mvccTracker0 = mvccTracker;
fut.listen(new CI1<IgniteInternalFuture<Map<K, V>>>() {
/**
* {@inheritDoc}
*/
@Override
public void apply(IgniteInternalFuture<Map<K, V>> future) {
if (future.isDone())
mvccTracker0.onDone();
}
});
}
return fut;
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param nearNode Near node.
* @param req Request.
* @param filter0 Filter.
* @return Future.
*/
public IgniteInternalFuture<GridNearLockResponse> lockAllAsync(final GridCacheContext<?, ?> cacheCtx, final ClusterNode nearNode, final GridNearLockRequest req, @Nullable final CacheEntryPredicate[] filter0) {
final List<KeyCacheObject> keys = req.keys();
CacheEntryPredicate[] filter = filter0;
// Set message into thread context.
GridDhtTxLocal tx = null;
try {
int cnt = keys.size();
if (req.inTx()) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
}
final List<GridCacheEntryEx> entries = new ArrayList<>(cnt);
// Unmarshal filter first.
if (filter == null)
filter = req.filter();
GridDhtLockFuture fut = null;
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert nearNode.isClient();
top = topology();
top.readLock();
if (!top.topologyVersionFuture().isDone()) {
top.readUnlock();
return null;
}
}
try {
if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.lastTopologyChangeVersion());
return new GridFinishedFuture<>(res);
}
if (req.inTx()) {
if (tx == null) {
tx = new GridDhtTxLocal(ctx.shared(), topology().readyTopologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), /*implicitTx*/
false, /*implicitSingleTx*/
false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.timeout(), req.isInvalidate(), !req.skipStore(), false, req.txSize(), null, securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), null);
if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + req.version();
U.warn(log, msg);
if (tx != null)
tx.rollbackDhtLocal();
return new GridDhtFinishedFuture<>(new IgniteTxRollbackCheckedException(msg));
}
tx.topologyVersion(req.topologyVersion());
}
GridDhtPartitionsExchangeFuture lastFinishedFut = ctx.shared().exchange().lastFinishedFuture();
CacheOperationContext opCtx = ctx.operationContextPerCall();
CacheInvalidStateException validateCacheE = lastFinishedFut.validateCache(ctx, opCtx != null && opCtx.recovery(), req.txRead(), null, keys);
if (validateCacheE != null)
throw validateCacheE;
} else {
fut = new GridDhtLockFuture(ctx, nearNode.id(), req.version(), req.topologyVersion(), cnt, req.txRead(), req.needReturnValue(), req.timeout(), tx, req.threadId(), req.createTtl(), req.accessTtl(), filter, req.skipStore(), req.keepBinary());
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
}
} finally {
if (top != null)
top.readUnlock();
}
boolean timedOut = false;
for (KeyCacheObject key : keys) {
if (timedOut)
break;
while (true) {
// Specify topology version to make sure containment is checked
// based on the requested version, not the latest.
GridDhtCacheEntry entry = entryExx(key, req.topologyVersion());
try {
if (fut != null) {
// This method will add local candidate.
// Entry cannot become obsolete after this method succeeded.
fut.addEntry(key == null ? null : entry);
if (fut.isDone()) {
timedOut = true;
break;
}
}
entries.add(entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Got lock request for cancelled lock (will ignore): " + entry);
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing DHT lock [tx=" + tx + ", entries=" + entries + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, entries, req.messageId(), req.txRead(), req.needReturnValue(), req.createTtl(), req.accessTtl(), req.skipStore(), req.keepBinary(), req.nearCache());
final GridDhtTxLocal t = tx;
return new GridDhtEmbeddedFuture(txFut, new C2<GridCacheReturn, Exception, IgniteInternalFuture<GridNearLockResponse>>() {
@Override
public IgniteInternalFuture<GridNearLockResponse> apply(GridCacheReturn o, Exception e) {
if (e != null)
e = U.unwrap(e);
// Transaction can be emptied by asynchronous rollback.
assert e != null || !t.empty();
// Create response while holding locks.
final GridNearLockResponse resp = createLockReply(nearNode, entries, req, t, t.xidVersion(), e);
assert !t.implicit() : t;
assert !t.onePhaseCommit() : t;
sendLockReply(nearNode, t, req, resp);
return new GridFinishedFuture<>(resp);
}
});
} else {
assert fut != null;
// This will send remote messages.
fut.map();
final GridCacheVersion mappedVer = fut.version();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, GridNearLockResponse>() {
@Override
public GridNearLockResponse apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(req.version());
GridNearLockResponse res = createLockReply(nearNode, entries, req, null, mappedVer, e);
sendLockReply(nearNode, null, req, res);
return res;
}
}, fut);
}
} catch (IgniteCheckedException | RuntimeException e) {
U.error(log, req, e);
if (tx != null) {
try {
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
}
try {
GridNearLockResponse res = createLockReply(nearNode, Collections.emptyList(), req, tx, tx != null ? tx.xidVersion() : req.version(), e);
sendLockReply(nearNode, null, req, res);
} catch (Exception ex) {
U.error(log, "Failed to send response for request message: " + req, ex);
}
return new GridDhtFinishedFuture<>(new IgniteCheckedException(e));
}
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridDistributedCacheAdapter method removeAll.
/**
* {@inheritDoc}
*/
@Override
public void removeAll() throws IgniteCheckedException {
try {
AffinityTopologyVersion topVer;
boolean retry;
CacheOperationContext opCtx = ctx.operationContextPerCall();
boolean skipStore = opCtx != null && opCtx.skipStore();
boolean keepBinary = opCtx != null && opCtx.isKeepBinary();
do {
retry = false;
topVer = ctx.affinity().affinityTopologyVersion();
// Send job to all data nodes.
Collection<ClusterNode> nodes = ctx.grid().cluster().forDataNodes(name()).nodes();
if (!nodes.isEmpty()) {
ctx.kernalContext().task().setThreadContext(TC_SUBGRID, nodes);
retry = !ctx.kernalContext().task().execute(new RemoveAllTask(ctx.name(), topVer, skipStore, keepBinary), null).get();
}
} while (ctx.affinity().affinityTopologyVersion().compareTo(topVer) != 0 || retry);
} catch (ClusterGroupEmptyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("All remote nodes left while cache remove [cacheName=" + name() + "]");
}
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridLocalAtomicCache method invokeAll.
/**
* {@inheritDoc}
*/
@Override
public <T> Map<K, EntryProcessorResult<T>> invokeAll(Map<? extends K, ? extends EntryProcessor<K, V, T>> map, Object... args) throws IgniteCheckedException {
A.notNull(map, "map");
warnIfUnordered(map, BulkOperation.INVOKE);
final boolean statsEnabled = ctx.statisticsEnabled();
final long start = statsEnabled ? System.nanoTime() : 0L;
CacheOperationContext opCtx = ctx.operationContextPerCall();
Map<K, EntryProcessorResult<T>> entryProcessorResult = (Map<K, EntryProcessorResult<T>>) updateAllInternal(TRANSFORM, map.keySet(), map.values(), args, expiryPerCall(), false, false, null, ctx.writeThrough(), ctx.readThrough(), opCtx != null && opCtx.isKeepBinary());
if (statsEnabled)
metrics0().addInvokeTimeNanos(System.nanoTime() - start);
return entryProcessorResult;
}
Aggregations