use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridDhtTxAbstractEnlistFuture method continueLoop.
/**
* Iterates over iterator, applies changes locally and sends it on backups.
*
* @param ignoreCntr {@code True} if need to ignore skip counter.
*/
private void continueLoop(boolean ignoreCntr) {
if (isDone() || (!ignoreCntr && (SKIP_UPD.getAndIncrement(this) != 0)))
return;
GridDhtCacheAdapter cache = cctx.dhtCache();
EnlistOperation op = it.operation();
AffinityTopologyVersion topVer = tx.topologyVersionSnapshot();
try {
while (true) {
int curPart = -1;
List<ClusterNode> backups = null;
while (hasNext0()) {
Object cur = next0();
KeyCacheObject key = toKey(op, cur);
if (curPart != key.partition())
backups = backupNodes(curPart = key.partition());
assert backups != null;
if (!ensureFreeSlot(key, backups)) {
// Can't advance further at the moment.
peek = cur;
it.beforeDetach();
break;
}
GridDhtCacheEntry entry = cache.entryExx(key);
if (log.isDebugEnabled())
log.debug("Adding entry: " + entry);
assert !entry.detached();
CacheObject val = op.isDeleteOrLock() || op.isInvoke() ? null : cctx.toCacheObject(((IgniteBiTuple) cur).getValue());
GridInvokeValue invokeVal = null;
EntryProcessor entryProc = null;
Object[] invokeArgs = null;
if (op.isInvoke()) {
assert needResult();
invokeVal = (GridInvokeValue) ((IgniteBiTuple) cur).getValue();
entryProc = invokeVal.entryProcessor();
invokeArgs = invokeVal.invokeArgs();
}
assert entryProc != null || !op.isInvoke();
boolean needOldVal = tx.txState().useMvccCaching(cctx.cacheId());
GridCacheUpdateTxResult res;
while (true) {
cctx.shared().database().checkpointReadLock();
try {
switch(op) {
case DELETE:
res = entry.mvccRemove(tx, cctx.localNodeId(), topVer, mvccSnapshot, isMoving(key.partition(), backups), needOldVal, filter, needResult());
break;
case INSERT:
case TRANSFORM:
case UPSERT:
case UPDATE:
res = entry.mvccSet(tx, cctx.localNodeId(), val, entryProc, invokeArgs, 0, topVer, mvccSnapshot, op.cacheOperation(), isMoving(key.partition(), backups), op.noCreate(), needOldVal, filter, needResult(), keepBinary);
break;
case LOCK:
res = entry.mvccLock(tx, mvccSnapshot);
break;
default:
throw new IgniteSQLException("Cannot acquire lock for operation [op= " + op + "]" + "Operation is unsupported at the moment ", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
entry = cache.entryExx(entry.key(), topVer);
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
IgniteInternalFuture<GridCacheUpdateTxResult> updateFut = res.updateFuture();
final Message val0 = invokeVal != null ? invokeVal : val;
if (updateFut != null) {
if (updateFut.isDone())
res = updateFut.get();
else {
GridDhtCacheEntry entry0 = entry;
List<ClusterNode> backups0 = backups;
it.beforeDetach();
updateFut.listen(new CI1<IgniteInternalFuture<GridCacheUpdateTxResult>>() {
@Override
public void apply(IgniteInternalFuture<GridCacheUpdateTxResult> fut) {
try {
tx.incrementLockCounter();
processEntry(entry0, op, fut.get(), val0, backups0);
continueLoop(true);
} catch (Throwable e) {
onDone(e);
}
}
});
// Can't move further. Exit loop without decrementing the counter.
return;
}
}
tx.incrementLockCounter();
processEntry(entry, op, res, val0, backups);
}
if (!hasNext0()) {
if (!F.isEmpty(batches)) {
// Flush incomplete batches.
// Need to skip batches for nodes where first request (contains tx info) is still in-flight.
// Otherwise, the regular enlist request (without tx info) may beat it to the primary node.
Iterator<Map.Entry<UUID, Batch>> it = batches.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<UUID, Batch> e = it.next();
ConcurrentMap<Integer, Batch> pending0 = pending == null ? null : pending.get(e.getKey());
if (pending0 == null || !pending0.containsKey(FIRST_BATCH_ID)) {
it.remove();
sendBatch(e.getValue());
}
}
}
if (noPendingRequests()) {
onDone(result0());
return;
}
}
if (SKIP_UPD.decrementAndGet(this) == 0)
break;
skipCntr = 1;
}
} catch (Throwable e) {
onDone(e);
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param nearNode Near node.
* @param req Request.
* @param filter0 Filter.
* @return Future.
*/
public IgniteInternalFuture<GridNearLockResponse> lockAllAsync(final GridCacheContext<?, ?> cacheCtx, final ClusterNode nearNode, final GridNearLockRequest req, @Nullable final CacheEntryPredicate[] filter0) {
final List<KeyCacheObject> keys = req.keys();
CacheEntryPredicate[] filter = filter0;
// Set message into thread context.
GridDhtTxLocal tx = null;
try {
int cnt = keys.size();
if (req.inTx()) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
}
final List<GridCacheEntryEx> entries = new ArrayList<>(cnt);
// Unmarshal filter first.
if (filter == null)
filter = req.filter();
GridDhtLockFuture fut = null;
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert nearNode.isClient();
top = topology();
top.readLock();
if (!top.topologyVersionFuture().isDone()) {
top.readUnlock();
return null;
}
}
try {
if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.lastTopologyChangeVersion());
return new GridFinishedFuture<>(res);
}
if (req.inTx()) {
if (tx == null) {
tx = new GridDhtTxLocal(ctx.shared(), topology().readyTopologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), /*implicitTx*/
false, /*implicitSingleTx*/
false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.timeout(), req.isInvalidate(), !req.skipStore(), false, req.txSize(), null, securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), null);
if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + req.version();
U.warn(log, msg);
if (tx != null)
tx.rollbackDhtLocal();
return new GridDhtFinishedFuture<>(new IgniteTxRollbackCheckedException(msg));
}
tx.topologyVersion(req.topologyVersion());
}
GridDhtPartitionsExchangeFuture lastFinishedFut = ctx.shared().exchange().lastFinishedFuture();
CacheOperationContext opCtx = ctx.operationContextPerCall();
CacheInvalidStateException validateCacheE = lastFinishedFut.validateCache(ctx, opCtx != null && opCtx.recovery(), req.txRead(), null, keys);
if (validateCacheE != null)
throw validateCacheE;
} else {
fut = new GridDhtLockFuture(ctx, nearNode.id(), req.version(), req.topologyVersion(), cnt, req.txRead(), req.needReturnValue(), req.timeout(), tx, req.threadId(), req.createTtl(), req.accessTtl(), filter, req.skipStore(), req.keepBinary());
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
}
} finally {
if (top != null)
top.readUnlock();
}
boolean timedOut = false;
for (KeyCacheObject key : keys) {
if (timedOut)
break;
while (true) {
// Specify topology version to make sure containment is checked
// based on the requested version, not the latest.
GridDhtCacheEntry entry = entryExx(key, req.topologyVersion());
try {
if (fut != null) {
// This method will add local candidate.
// Entry cannot become obsolete after this method succeeded.
fut.addEntry(key == null ? null : entry);
if (fut.isDone()) {
timedOut = true;
break;
}
}
entries.add(entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Got lock request for cancelled lock (will ignore): " + entry);
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing DHT lock [tx=" + tx + ", entries=" + entries + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, entries, req.messageId(), req.txRead(), req.needReturnValue(), req.createTtl(), req.accessTtl(), req.skipStore(), req.keepBinary(), req.nearCache());
final GridDhtTxLocal t = tx;
return new GridDhtEmbeddedFuture(txFut, new C2<GridCacheReturn, Exception, IgniteInternalFuture<GridNearLockResponse>>() {
@Override
public IgniteInternalFuture<GridNearLockResponse> apply(GridCacheReturn o, Exception e) {
if (e != null)
e = U.unwrap(e);
// Transaction can be emptied by asynchronous rollback.
assert e != null || !t.empty();
// Create response while holding locks.
final GridNearLockResponse resp = createLockReply(nearNode, entries, req, t, t.xidVersion(), e);
assert !t.implicit() : t;
assert !t.onePhaseCommit() : t;
sendLockReply(nearNode, t, req, resp);
return new GridFinishedFuture<>(resp);
}
});
} else {
assert fut != null;
// This will send remote messages.
fut.map();
final GridCacheVersion mappedVer = fut.version();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, GridNearLockResponse>() {
@Override
public GridNearLockResponse apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(req.version());
GridNearLockResponse res = createLockReply(nearNode, entries, req, null, mappedVer, e);
sendLockReply(nearNode, null, req, res);
return res;
}
}, fut);
}
} catch (IgniteCheckedException | RuntimeException e) {
U.error(log, req, e);
if (tx != null) {
try {
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
}
try {
GridNearLockResponse res = createLockReply(nearNode, Collections.emptyList(), req, tx, tx != null ? tx.xidVersion() : req.version(), e);
sendLockReply(nearNode, null, req, res);
} catch (Exception ex) {
U.error(log, "Failed to send response for request message: " + req, ex);
}
return new GridDhtFinishedFuture<>(new IgniteCheckedException(e));
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridDhtTxLocalAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param entries Entries to lock.
* @param msgId Message ID.
* @param read Read flag.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param needRetVal Return value flag.
* @param skipStore Skip store flag.
* @param keepBinary Keep binary flag.
* @param nearCache {@code True} if near cache enabled on originating node.
* @return Lock future.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
IgniteInternalFuture<GridCacheReturn> lockAllAsync(GridCacheContext cacheCtx, List<GridCacheEntryEx> entries, long msgId, final boolean read, final boolean needRetVal, long createTtl, long accessTtl, boolean skipStore, boolean keepBinary, boolean nearCache) {
try {
checkValid();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
if (F.isEmpty(entries))
return new GridFinishedFuture<>(ret);
init();
onePhaseCommit(onePhaseCommit);
try {
GridFutureAdapter<GridCacheReturn> enlistFut = new GridFutureAdapter<>();
if (!updateLockFuture(null, enlistFut))
return finishFuture(enlistFut, timedOut() ? timeoutException() : rollbackException(), false);
Set<KeyCacheObject> skipped = null;
try {
AffinityTopologyVersion topVer = topologyVersion();
GridDhtCacheAdapter dhtCache = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
// Enlist locks into transaction.
for (int i = 0; i < entries.size(); i++) {
GridCacheEntryEx entry = entries.get(i);
KeyCacheObject key = entry.key();
IgniteTxEntry txEntry = entry(entry.txKey());
// First time access.
if (txEntry == null) {
GridDhtCacheEntry cached;
while (true) {
try {
cached = dhtCache.entryExx(key, topVer);
cached.unswap(read);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Get removed entry: " + key);
}
}
addActiveCache(dhtCache.context(), false);
txEntry = addEntry(NOOP, null, null, null, cached, null, CU.empty0(), false, -1L, -1L, null, skipStore, keepBinary, nearCache);
if (read)
txEntry.ttl(accessTtl);
txEntry.cached(cached);
addReader(msgId, cached, txEntry, topVer);
} else {
if (skipped == null)
skipped = new GridLeanSet<>();
skipped.add(key);
}
}
} finally {
finishFuture(enlistFut, null, true);
}
assert pessimistic();
Collection<KeyCacheObject> keys = F.viewReadOnly(entries, CU.entry2Key());
// Acquire locks only after having added operation to the write set.
// Otherwise, during rollback we will not know whether locks need
// to be rolled back.
// Loose all skipped and previously locked (we cannot reenter locks here).
final Collection<KeyCacheObject> passedKeys = skipped != null ? F.view(keys, F0.notIn(skipped)) : keys;
if (log.isDebugEnabled())
log.debug("Lock keys: " + passedKeys);
return obtainLockAsync(cacheCtx, ret, passedKeys, read, needRetVal, createTtl, accessTtl, skipStore, keepBinary);
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridDhtGetSingleFuture method getAsync.
/**
*/
private void getAsync() {
assert part != -1;
String taskName0 = cctx.kernalContext().job().currentTaskName();
if (taskName0 == null)
taskName0 = cctx.kernalContext().task().resolveTaskName(taskNameHash);
final String taskName = taskName0;
IgniteInternalFuture<Boolean> rdrFut = null;
ReaderArguments readerArgs = null;
if (addRdr && !skipVals && !cctx.localNodeId().equals(reader)) {
while (true) {
GridDhtCacheEntry e = cache().entryExx(key, topVer);
try {
if (e.obsolete())
continue;
boolean addReader = !e.deleted();
if (addReader) {
e.unswap(false);
// we have to add reader again later.
if (readerArgs == null)
readerArgs = new ReaderArguments(reader, msgId, topVer);
}
// Register reader. If there are active transactions for this entry,
// then will wait for their completion before proceeding.
// TODO: IGNITE-3498:
// TODO: What if any transaction we wait for actually removes this entry?
// TODO: In this case seems like we will be stuck with untracked near entry.
// TODO: To fix, check that reader is contained in the list of readers once
// TODO: again after the returned future completes - if not, try again.
rdrFut = addReader ? e.addReader(reader, msgId, topVer) : null;
break;
} catch (IgniteCheckedException err) {
onDone(err);
return;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when getting a DHT value: " + e);
} finally {
e.touch();
}
}
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut;
if (rdrFut == null || rdrFut.isDone()) {
fut = cache().getDhtAllAsync(Collections.singleton(key), readerArgs, readThrough, taskName, expiryPlc, skipVals, recovery, txLbl, mvccSnapshot);
} else {
final ReaderArguments args = readerArgs;
rdrFut.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut) {
Throwable e = fut.error();
if (e != null) {
onDone(e);
return;
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut0 = cache().getDhtAllAsync(Collections.singleton(key), args, readThrough, taskName, expiryPlc, skipVals, recovery, null, mvccSnapshot);
fut0.listen(createGetFutureListener());
}
});
return;
}
if (fut.isDone())
onResult(fut);
else
fut.listen(createGetFutureListener());
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridDhtGetSingleFuture method toEntryInfo.
/**
* @param map Map to convert.
* @return List of infos.
*/
private GridCacheEntryInfo toEntryInfo(Map<KeyCacheObject, EntryGetResult> map) {
if (map.isEmpty())
return null;
EntryGetResult val = map.get(key);
assert val != null;
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.cacheId(cctx.cacheId());
info.key(key);
info.value(skipVals ? null : (CacheObject) val.value());
info.version(val.version());
info.expireTime(val.expireTime());
info.ttl(val.ttl());
return info;
}
Aggregations