use of org.apache.ignite.internal.processors.cache.version.GridCacheVersion in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param nearNode Near node.
* @param req Request.
* @param filter0 Filter.
* @return Future.
*/
public IgniteInternalFuture<GridNearLockResponse> lockAllAsync(final GridCacheContext<?, ?> cacheCtx, final ClusterNode nearNode, final GridNearLockRequest req, @Nullable final CacheEntryPredicate[] filter0) {
final List<KeyCacheObject> keys = req.keys();
CacheEntryPredicate[] filter = filter0;
// Set message into thread context.
GridDhtTxLocal tx = null;
try {
int cnt = keys.size();
if (req.inTx()) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
}
final List<GridCacheEntryEx> entries = new ArrayList<>(cnt);
// Unmarshal filter first.
if (filter == null)
filter = req.filter();
GridDhtLockFuture fut = null;
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert nearNode.isClient();
top = topology();
top.readLock();
if (!top.topologyVersionFuture().isDone()) {
top.readUnlock();
return null;
}
}
try {
if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.lastTopologyChangeVersion());
return new GridFinishedFuture<>(res);
}
if (req.inTx()) {
if (tx == null) {
tx = new GridDhtTxLocal(ctx.shared(), topology().readyTopologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), /*implicitTx*/
false, /*implicitSingleTx*/
false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.timeout(), req.isInvalidate(), !req.skipStore(), false, req.txSize(), null, securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), null);
if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + req.version();
U.warn(log, msg);
if (tx != null)
tx.rollbackDhtLocal();
return new GridDhtFinishedFuture<>(new IgniteTxRollbackCheckedException(msg));
}
tx.topologyVersion(req.topologyVersion());
}
GridDhtPartitionsExchangeFuture lastFinishedFut = ctx.shared().exchange().lastFinishedFuture();
CacheOperationContext opCtx = ctx.operationContextPerCall();
CacheInvalidStateException validateCacheE = lastFinishedFut.validateCache(ctx, opCtx != null && opCtx.recovery(), req.txRead(), null, keys);
if (validateCacheE != null)
throw validateCacheE;
} else {
fut = new GridDhtLockFuture(ctx, nearNode.id(), req.version(), req.topologyVersion(), cnt, req.txRead(), req.needReturnValue(), req.timeout(), tx, req.threadId(), req.createTtl(), req.accessTtl(), filter, req.skipStore(), req.keepBinary());
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
}
} finally {
if (top != null)
top.readUnlock();
}
boolean timedOut = false;
for (KeyCacheObject key : keys) {
if (timedOut)
break;
while (true) {
// Specify topology version to make sure containment is checked
// based on the requested version, not the latest.
GridDhtCacheEntry entry = entryExx(key, req.topologyVersion());
try {
if (fut != null) {
// This method will add local candidate.
// Entry cannot become obsolete after this method succeeded.
fut.addEntry(key == null ? null : entry);
if (fut.isDone()) {
timedOut = true;
break;
}
}
entries.add(entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Got lock request for cancelled lock (will ignore): " + entry);
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing DHT lock [tx=" + tx + ", entries=" + entries + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, entries, req.messageId(), req.txRead(), req.needReturnValue(), req.createTtl(), req.accessTtl(), req.skipStore(), req.keepBinary(), req.nearCache());
final GridDhtTxLocal t = tx;
return new GridDhtEmbeddedFuture(txFut, new C2<GridCacheReturn, Exception, IgniteInternalFuture<GridNearLockResponse>>() {
@Override
public IgniteInternalFuture<GridNearLockResponse> apply(GridCacheReturn o, Exception e) {
if (e != null)
e = U.unwrap(e);
// Transaction can be emptied by asynchronous rollback.
assert e != null || !t.empty();
// Create response while holding locks.
final GridNearLockResponse resp = createLockReply(nearNode, entries, req, t, t.xidVersion(), e);
assert !t.implicit() : t;
assert !t.onePhaseCommit() : t;
sendLockReply(nearNode, t, req, resp);
return new GridFinishedFuture<>(resp);
}
});
} else {
assert fut != null;
// This will send remote messages.
fut.map();
final GridCacheVersion mappedVer = fut.version();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, GridNearLockResponse>() {
@Override
public GridNearLockResponse apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(req.version());
GridNearLockResponse res = createLockReply(nearNode, entries, req, null, mappedVer, e);
sendLockReply(nearNode, null, req, res);
return res;
}
}, fut);
}
} catch (IgniteCheckedException | RuntimeException e) {
U.error(log, req, e);
if (tx != null) {
try {
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
}
try {
GridNearLockResponse res = createLockReply(nearNode, Collections.emptyList(), req, tx, tx != null ? tx.xidVersion() : req.version(), e);
sendLockReply(nearNode, null, req, res);
} catch (Exception ex) {
U.error(log, "Failed to send response for request message: " + req, ex);
}
return new GridDhtFinishedFuture<>(new IgniteCheckedException(e));
}
}
use of org.apache.ignite.internal.processors.cache.version.GridCacheVersion in project ignite by apache.
the class GridDhtTxPrepareFuture method addDhtValues.
/**
* @param res Response being sent.
*/
private void addDhtValues(GridNearTxPrepareResponse res) {
// Interceptor on near node needs old values to execute callbacks.
if (req.writes() != null) {
for (IgniteTxEntry e : req.writes()) {
IgniteTxEntry txEntry = tx.entry(e.txKey());
assert txEntry != null : "Missing tx entry for key [tx=" + tx + ", key=" + e.txKey() + ']';
GridCacheContext cacheCtx = txEntry.context();
while (true) {
try {
GridCacheEntryEx entry = txEntry.cached();
GridCacheVersion dhtVer = entry.version();
CacheObject val0 = entry.valueBytes();
if (val0 != null)
res.addOwnedValue(txEntry.txKey(), dhtVer, val0);
break;
} catch (GridCacheEntryRemovedException ignored) {
// Retry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion()));
}
}
}
}
for (Map.Entry<IgniteTxKey, GridCacheVersion> ver : dhtVerMap.entrySet()) {
IgniteTxEntry txEntry = tx.entry(ver.getKey());
if (res.hasOwnedValue(ver.getKey()))
continue;
assert txEntry != null : ver;
GridCacheContext cacheCtx = txEntry.context();
while (true) {
try {
GridCacheEntryEx entry = txEntry.cached();
GridCacheVersion dhtVer = entry.version();
if (ver.getValue() == null || !ver.getValue().equals(dhtVer)) {
CacheObject val0 = entry.valueBytes();
res.addOwnedValue(txEntry.txKey(), dhtVer, val0);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
// Retry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion()));
}
}
}
}
use of org.apache.ignite.internal.processors.cache.version.GridCacheVersion in project ignite by apache.
the class GridDhtTxFinishFuture method addDiagnosticRequest.
/**
* {@inheritDoc}
*/
@Override
public void addDiagnosticRequest(IgniteDiagnosticPrepareContext ctx) {
if (!isDone()) {
for (IgniteInternalFuture fut : futures()) {
if (!fut.isDone()) {
if (MiniFuture.class.isInstance(fut)) {
MiniFuture f = (MiniFuture) fut;
if (!f.node().isLocal()) {
GridCacheVersion dhtVer = tx.xidVersion();
GridCacheVersion nearVer = tx.nearXidVersion();
ctx.remoteTxInfo(f.node().id(), dhtVer, nearVer, "GridDhtTxFinishFuture " + "waiting for response [node=" + f.node().id() + ", topVer=" + tx.topologyVersion() + ", dhtVer=" + dhtVer + ", nearVer=" + nearVer + ", futId=" + futId + ", miniId=" + f.futId + ", tx=" + tx + ']');
return;
}
} else if (fut instanceof MvccFuture) {
MvccFuture f = (MvccFuture) fut;
if (!cctx.localNodeId().equals(f.coordinatorNodeId())) {
ctx.basicInfo(f.coordinatorNodeId(), "GridDhtTxFinishFuture " + "waiting for mvcc coordinator reply [mvccCrdNode=" + f.coordinatorNodeId() + ", loc=" + f.coordinatorNodeId().equals(cctx.localNodeId()) + ']');
return;
}
}
}
}
}
}
use of org.apache.ignite.internal.processors.cache.version.GridCacheVersion in project ignite by apache.
the class GridDhtTxPrepareRequest method finishUnmarshal.
/**
* {@inheritDoc}
*/
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
super.finishUnmarshal(ctx, ldr);
if (ownedKeys != null) {
assert ownedKeys.size() == ownedVals.size();
owned = U.newHashMap(ownedKeys.size());
Iterator<IgniteTxKey> keyIter = ownedKeys.iterator();
Iterator<GridCacheVersion> valIter = ownedVals.iterator();
while (keyIter.hasNext()) {
IgniteTxKey key = keyIter.next();
GridCacheContext<?, ?> cacheCtx = ctx.cacheContext(key.cacheId());
if (cacheCtx != null) {
key.finishUnmarshal(cacheCtx, ldr);
owned.put(key, valIter.next());
}
}
}
if (nearWrites != null) {
for (Iterator<IgniteTxEntry> it = nearWrites.iterator(); it.hasNext(); ) {
IgniteTxEntry e = it.next();
GridCacheContext<?, ?> cacheCtx = ctx.cacheContext(e.cacheId());
if (cacheCtx == null) {
it.remove();
if (nearWritesCacheMissed == null)
nearWritesCacheMissed = new ArrayList<>();
nearWritesCacheMissed.add(e.txKey());
} else {
e.context(cacheCtx);
e.unmarshal(ctx, true, ldr);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.version.GridCacheVersion in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method createLockReply.
/**
* @param nearNode Near node.
* @param entries Entries.
* @param req Lock request.
* @param tx Transaction.
* @param mappedVer Mapped version.
* @param err Error.
* @return Response.
*/
private GridNearLockResponse createLockReply(ClusterNode nearNode, List<GridCacheEntryEx> entries, GridNearLockRequest req, @Nullable GridDhtTxLocalAdapter tx, GridCacheVersion mappedVer, Throwable err) {
assert mappedVer != null;
assert tx == null || tx.xidVersion().equals(mappedVer);
try {
// All subsequent lock requests must use actual topology version to avoid mapping on invalid primaries.
AffinityTopologyVersion clienRemapVer = req.firstClientRequest() && tx != null && topology().readyTopologyVersion().after(req.topologyVersion()) ? topology().readyTopologyVersion() : null;
// Send reply back to originating near node.
GridNearLockResponse res = new GridNearLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), tx != null && tx.onePhaseCommit(), entries.size(), err, clienRemapVer, ctx.deploymentEnabled(), clienRemapVer != null);
if (err == null) {
res.pending(localDhtPendingVersions(entries, mappedVer));
// We have to add completed versions for cases when nearLocal and remote transactions
// execute concurrently.
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(req.version());
res.completedVersions(versPair.get1(), versPair.get2());
int i = 0;
for (ListIterator<GridCacheEntryEx> it = entries.listIterator(); it.hasNext(); ) {
GridCacheEntryEx e = it.next();
assert e != null;
while (true) {
try {
// Don't return anything for invalid partitions.
if (tx == null || !tx.isRollbackOnly()) {
GridCacheVersion dhtVer = req.dhtVersion(i);
GridCacheVersion ver = e.version();
boolean ret = req.returnValue(i) || dhtVer == null || !dhtVer.equals(ver);
CacheObject val = null;
if (ret) {
val = e.innerGet(null, tx, /*read-through*/
false, /*update-metrics*/
true, /*event notification*/
req.returnValue(i), null, tx != null ? tx.resolveTaskName() : null, null, req.keepBinary());
}
assert e.lockedBy(mappedVer) || ctx.mvcc().isRemoved(e.context(), mappedVer) || tx != null && tx.isRollbackOnly() : "Entry does not own lock for tx [locNodeId=" + ctx.localNodeId() + ", entry=" + e + ", mappedVer=" + mappedVer + ", ver=" + ver + ", tx=" + CU.txString(tx) + ", req=" + req + ']';
boolean filterPassed = false;
if (tx != null && tx.onePhaseCommit()) {
IgniteTxEntry writeEntry = tx.entry(ctx.txKey(e.key()));
assert writeEntry != null : "Missing tx entry for locked cache entry: " + e;
filterPassed = writeEntry.filtersPassed();
}
if (ret && val == null)
val = e.valueBytes(null);
// We include values into response since they are required for local
// calls and won't be serialized. We are also including DHT version.
res.addValueBytes(ret ? val : null, filterPassed, ver, mappedVer);
} else {
// We include values into response since they are required for local
// calls and won't be serialized. We are also including DHT version.
res.addValueBytes(null, false, e.version(), mappedVer);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when sending reply to DHT lock request " + "(will retry): " + e);
e = entryExx(e.key());
it.set(e);
}
}
i++;
}
}
return res;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get value for lock reply message for node [node=" + U.toShortString(nearNode) + ", req=" + req + ']', e);
return new GridNearLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), false, entries.size(), e, null, ctx.deploymentEnabled(), false);
}
}
Aggregations