use of org.apache.ignite.internal.processors.cache.GridCacheReturn in project ignite by apache.
the class GridNearTxLocal method optimisticPutFuture.
/**
* @param cacheCtx Cache context.
* @param loadFut Missing keys load future.
* @param ret Future result.
* @param keepBinary Keep binary flag.
* @return Future.
*/
private IgniteInternalFuture optimisticPutFuture(final GridCacheContext cacheCtx, IgniteInternalFuture<Void> loadFut, final GridCacheReturn ret, final boolean keepBinary) {
if (implicit()) {
// with prepare response, if required.
assert loadFut.isDone();
try {
loadFut.get();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
return nonInterruptable(commitNearTxLocalAsync().chain(new CX1<IgniteInternalFuture<IgniteInternalTx>, GridCacheReturn>() {
@Override
public GridCacheReturn applyx(IgniteInternalFuture<IgniteInternalTx> txFut) throws IgniteCheckedException {
try {
txFut.get();
Object res = implicitRes.value();
if (implicitRes.invokeResult()) {
assert res == null || res instanceof Map : implicitRes;
res = cacheCtx.unwrapInvokeResult((Map) res, keepBinary);
}
return new GridCacheReturn(cacheCtx, true, keepBinary, res, implicitRes.success());
} catch (IgniteCheckedException | RuntimeException e) {
if (!(e instanceof NodeStoppingException))
rollbackNearTxLocalAsync();
throw e;
}
}
}));
} else {
return nonInterruptable(loadFut.chain(new CX1<IgniteInternalFuture<Void>, GridCacheReturn>() {
@Override
public GridCacheReturn applyx(IgniteInternalFuture<Void> f) throws IgniteCheckedException {
f.get();
return ret;
}
}));
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturn in project ignite by apache.
the class GridNearTxLocal method removeAllAsync0.
/**
* @param cacheCtx Cache context.
* @param keys Keys to remove.
* @param drMap DR map.
* @param retval Flag indicating whether a value should be returned.
* @param filter Filter.
* @param singleRmv {@code True} for single key remove operation ({@link Cache#remove(Object)}.
* @return Future for asynchronous remove.
*/
@SuppressWarnings("unchecked")
private <K, V> IgniteInternalFuture<GridCacheReturn> removeAllAsync0(final GridCacheContext cacheCtx, @Nullable AffinityTopologyVersion entryTopVer, @Nullable final Collection<? extends K> keys, @Nullable Map<KeyCacheObject, GridCacheVersion> drMap, final boolean retval, @Nullable final CacheEntryPredicate filter, boolean singleRmv) {
try {
checkUpdatesAllowed(cacheCtx);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture(e);
}
cacheCtx.checkSecurity(SecurityPermission.CACHE_REMOVE);
if (retval)
needReturnValue(true);
final Collection<?> keys0;
if (drMap != null) {
assert keys == null;
keys0 = drMap.keySet();
} else
keys0 = keys;
CacheOperationContext opCtx = cacheCtx.operationContextPerCall();
final Byte dataCenterId;
if (opCtx != null && opCtx.hasDataCenterId()) {
assert drMap == null : drMap;
dataCenterId = opCtx.dataCenterId();
} else
dataCenterId = null;
assert keys0 != null;
if (log.isDebugEnabled())
log.debug(S.toString("Called removeAllAsync(...)", "tx", this, false, "keys", keys0, true, "implicit", implicit, false, "retval", retval, false));
try {
checkValid();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
if (F.isEmpty(keys0)) {
if (implicit()) {
try {
commit();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
return new GridFinishedFuture<>(ret.success(true));
}
init();
final Collection<KeyCacheObject> enlisted = new ArrayList<>();
ExpiryPolicy plc;
final CacheEntryPredicate[] filters = CU.filterArray(filter);
if (!F.isEmpty(filters))
plc = opCtx != null ? opCtx.expiry() : null;
else
plc = null;
final boolean keepBinary = opCtx != null && opCtx.isKeepBinary();
final IgniteInternalFuture<Void> loadFut = enlistWrite(cacheCtx, entryTopVer, keys0, plc, /*lookup map*/
null, /*invoke map*/
null, /*invoke arguments*/
null, retval, /*lock only*/
false, filters, ret, enlisted, null, drMap, opCtx != null && opCtx.skipStore(), singleRmv, keepBinary, opCtx != null && opCtx.recovery(), dataCenterId);
if (log.isDebugEnabled())
log.debug("Remove keys: " + enlisted);
// to be rolled back.
if (pessimistic()) {
assert loadFut == null || loadFut.isDone() : loadFut;
if (loadFut != null) {
try {
loadFut.get();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (log.isDebugEnabled())
log.debug("Before acquiring transaction lock for remove on keys: " + enlisted);
long timeout = remainingTime();
if (timeout == -1)
return new GridFinishedFuture<>(timeoutException());
IgniteInternalFuture<Boolean> fut = cacheCtx.cache().txLockAsync(enlisted, timeout, this, false, retval, isolation, isInvalidate(), -1L, -1L);
PLC1<GridCacheReturn> plc1 = new PLC1<GridCacheReturn>(ret) {
@Override
protected GridCacheReturn postLock(GridCacheReturn ret) throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Acquired transaction lock for remove on keys: " + enlisted);
postLockWrite(cacheCtx, enlisted, ret, /*remove*/
true, retval, /*read*/
false, -1L, filters, /*computeInvoke*/
false);
return ret;
}
};
if (fut.isDone()) {
try {
return nonInterruptable(plc1.apply(fut.get(), null));
} catch (GridClosureException e) {
return new GridFinishedFuture<>(e.unwrap());
} catch (IgniteCheckedException e) {
try {
return nonInterruptable(plc1.apply(false, e));
} catch (Exception e1) {
return new GridFinishedFuture<>(e1);
}
}
} else
return nonInterruptable(new GridEmbeddedFuture<>(fut, plc1));
} else {
if (implicit()) {
// with prepare response, if required.
assert loadFut.isDone();
return nonInterruptable(commitNearTxLocalAsync().chain(new CX1<IgniteInternalFuture<IgniteInternalTx>, GridCacheReturn>() {
@Override
public GridCacheReturn applyx(IgniteInternalFuture<IgniteInternalTx> txFut) throws IgniteCheckedException {
try {
txFut.get();
return new GridCacheReturn(cacheCtx, true, keepBinary, implicitRes.value(), implicitRes.success());
} catch (IgniteCheckedException | RuntimeException e) {
rollbackNearTxLocalAsync();
throw e;
}
}
}));
} else {
return nonInterruptable(loadFut.chain(new CX1<IgniteInternalFuture<Void>, GridCacheReturn>() {
@Override
public GridCacheReturn applyx(IgniteInternalFuture<Void> f) throws IgniteCheckedException {
f.get();
return ret;
}
}));
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturn in project ignite by apache.
the class GridNearTxFinishFuture method checkBackup.
/**
*
*/
private void checkBackup() {
assert !hasFutures() : futures();
GridDistributedTxMapping mapping = mappings.singleMapping();
if (mapping != null) {
UUID nodeId = mapping.primary().id();
Collection<UUID> backups = tx.transactionNodes().get(nodeId);
if (!F.isEmpty(backups)) {
assert backups.size() == 1;
UUID backupId = F.first(backups);
ClusterNode backup = cctx.discovery().node(backupId);
// Nothing to do if backup has left the grid.
if (backup == null) {
readyNearMappingFromBackup(mapping);
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Backup node left grid: " + backupId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(backup has left grid): " + tx.xidVersion(), cause));
} else {
final CheckBackupMiniFuture mini = new CheckBackupMiniFuture(1, backup, mapping);
add(mini);
if (backup.isLocal()) {
boolean committed = !cctx.tm().addRolledbackTx(tx);
readyNearMappingFromBackup(mapping);
if (committed) {
try {
if (tx.needReturnValue() && tx.implicit()) {
GridCacheReturnCompletableWrapper wrapper = cctx.tm().getCommittedTxReturn(tx.xidVersion());
assert wrapper != null : tx.xidVersion();
GridCacheReturn retVal = wrapper.fut().get();
assert retVal != null;
tx.implicitSingleResult(retVal);
}
if (tx.syncMode() == FULL_SYNC) {
GridCacheVersion nearXidVer = tx.nearXidVersion();
assert nearXidVer != null : tx;
IgniteInternalFuture<?> fut = cctx.tm().remoteTxFinishFuture(nearXidVer);
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
mini.onDone(tx);
}
});
return;
}
mini.onDone(tx);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to finish [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
} else {
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Primary node left grid: " + nodeId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
mini.onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(transaction has been rolled back on backup node): " + tx.xidVersion(), cause));
}
} else {
GridDhtTxFinishRequest finishReq = checkCommittedRequest(mini.futureId(), false);
try {
cctx.io().send(backup, finishReq, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, sent check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
mini.onNodeLeft(backupId, false);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to send check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
}
}
} else
readyNearMappingFromBackup(mapping);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturn in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param nodeId Node ID.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
return;
}
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
boolean remap = false;
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
IgniteCacheExpiryPolicy expiry = null;
try {
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
completionCb.apply(req, res);
return;
}
// external transaction or explicit lock.
if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
locked = lockEntries(req, req.topologyVersion());
boolean hasNear = ctx.discovery().cacheNearNode(node, name());
// Assign next version for update inside entries lock.
GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
if (hasNear)
res.nearVersion(ver);
if (msgLog.isDebugEnabled()) {
msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
}
assert ver != null : "Got null version for update request: " + req;
boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
dhtFut = createDhtFuture(ver, req);
expiry = expiryPolicy(req.expiry());
GridCacheReturn retVal = null;
if (// Several keys ...
req.size() > 1 && writeThrough() && // and store is enabled ...
!req.skipStore() && // and this is not local store ...
!ctx.store().isLocal() && // (conflict resolver should be used for local store)
!// and no DR.
ctx.dr().receiveEnabled()) {
// This method can only be used when there are no replicated entries in the batch.
UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
if (req.operation() == TRANSFORM)
retVal = updRes.invokeResults();
} else {
UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
retVal = updRes.returnValue();
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
}
if (retVal == null)
retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
res.returnValue(retVal);
if (dhtFut != null) {
if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
!ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
if (tracker != null && tracker instanceof GridNioMessageTracker) {
((GridNioMessageTracker) tracker).onMessageReceived();
dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
((GridNioMessageTracker) tracker).onMessageProcessed();
}
});
}
}
ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
}
} else {
// Should remap all keys.
remap = true;
res.remapTopologyVersion(top.topologyVersion());
}
} finally {
top.readUnlock();
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
remap = true;
res.remapTopologyVersion(ctx.topology().topologyVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
}
if (remap) {
assert dhtFut == null;
completionCb.apply(req, res);
} else if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturn in project ignite by apache.
the class GridDhtAtomicCache method updateSingle.
/**
* Updates locked entries one-by-one.
*
* @param nearNode Originating node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned update version.
* @param dhtFut Optional DHT future.
* @param replicate Whether DR is enabled for that cache.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Return value.
* @throws GridCacheEntryRemovedException Should be never thrown.
*/
private UpdateSingleResult updateSingle(ClusterNode nearNode, boolean hasNear, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res, List<GridDhtCacheEntry> locked, GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, boolean replicate, String taskName, @Nullable IgniteCacheExpiryPolicy expiry, boolean sndPrevVal) throws GridCacheEntryRemovedException {
GridCacheReturn retVal = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < req.size(); i++) {
KeyCacheObject k = req.key(i);
GridCacheOperation op = req.operation();
// No GridCacheEntryRemovedException can be thrown.
try {
GridDhtCacheEntry entry = locked.get(i);
GridCacheVersion newConflictVer = req.conflictVersion(i);
long newConflictTtl = req.conflictTtl(i);
long newConflictExpireTime = req.conflictExpireTime(i);
assert !(newConflictVer instanceof GridCacheVersionEx) : newConflictVer;
Object writeVal = op == TRANSFORM ? req.entryProcessor(i) : req.writeValue(i);
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, req.invokeArguments(), writeThrough() && !req.skipStore(), !req.skipStore(), sndPrevVal || req.returnValue(), req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, req.filter(), replicate ? DR_PRIMARY : DR_NONE, newConflictTtl, newConflictExpireTime, newConflictVer, /*conflictResolve*/
true, intercept, req.subjectId(), taskName, /*prevVal*/
null, /*updateCntr*/
null, dhtFut);
if (dhtFut != null) {
if (updRes.sendToDht()) {
// Send to backups even in case of remove-remove scenarios.
GridCacheVersionConflictContext<?, ?> conflictCtx = updRes.conflictResolveResult();
if (conflictCtx == null)
newConflictVer = null;
else if (conflictCtx.isMerge())
// Conflict version is discarded in case of merge.
newConflictVer = null;
EntryProcessor<Object, Object, Object> entryProcessor = null;
dhtFut.addWriteEntry(affAssignment, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime(), newConflictVer, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime());
} else {
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter or conflict resolution (will skip write) " + "[entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ']');
}
}
if (hasNear) {
if (updRes.sendToDht()) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
// If put the same value as in request then do not need to send it back.
if (op == TRANSFORM || writeVal != updRes.newValue()) {
res.addNearValue(i, updRes.newValue(), updRes.newTtl(), updRes.conflictExpireTime());
} else
res.addNearTtl(i, updRes.newTtl(), updRes.conflictExpireTime());
if (updRes.newValue() != null) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
F.contains(readers, nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(i);
} else
res.addSkippedIndex(i);
}
if (updRes.removeVersion() != null) {
if (deleted == null)
deleted = new ArrayList<>(req.size());
deleted.add(F.t(entry, updRes.removeVersion()));
}
if (op == TRANSFORM) {
assert !req.returnValue();
IgniteBiTuple<Object, Exception> compRes = updRes.computedResult();
if (compRes != null && (compRes.get1() != null || compRes.get2() != null)) {
if (retVal == null)
retVal = new GridCacheReturn(nearNode.isLocal());
retVal.addEntryProcessResult(ctx, k, null, compRes.get1(), compRes.get2(), req.keepBinary());
}
} else {
// Create only once.
if (retVal == null) {
CacheObject ret = updRes.oldValue();
retVal = new GridCacheReturn(ctx, nearNode.isLocal(), req.keepBinary(), req.returnValue() ? ret : null, updRes.success());
}
}
} catch (IgniteCheckedException e) {
res.addFailedKey(k, e);
}
}
return new UpdateSingleResult(retVal, deleted, dhtFut);
}
Aggregations