use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearLockFuture method mapOnTopology.
/**
* Acquires topology future and checks it completeness under the read lock. If it is not complete,
* will asynchronously wait for it's completeness and then try again.
*
* @param remap Remap flag.
*/
synchronized void mapOnTopology(final boolean remap) {
// We must acquire topology snapshot from the topology version future.
cctx.topology().readLock();
try {
if (cctx.topology().stopping()) {
onDone(cctx.shared().cache().isCacheRestarting(cctx.name()) ? new IgniteCacheRestartingException(cctx.name()) : new CacheStoppedException(cctx.name()));
return;
}
GridDhtTopologyFuture fut = cctx.topologyVersionFuture();
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, recovery, read, null, keys);
if (err != null) {
onDone(err);
return;
}
AffinityTopologyVersion topVer = fut.topologyVersion();
if (remap) {
if (tx != null)
tx.onRemap(topVer, true);
this.topVer = topVer;
} else {
if (tx != null)
tx.topologyVersion(topVer);
if (this.topVer == null)
this.topVer = topVer;
}
map(keys, remap, false);
markInitialized();
} else {
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
try {
fut.get();
mapOnTopology(remap);
} catch (IgniteCheckedException e) {
onDone(e);
} finally {
cctx.shared().txContextReset();
}
}
});
}
} finally {
cctx.topology().readUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearOptimisticTxPrepareFutureAdapter method prepareOnTopology.
/**
* @param remap Remap flag.
* @param c Optional closure to run after map.
*/
protected final void prepareOnTopology(final boolean remap, @Nullable final Runnable c) {
GridDhtTopologyFuture topFut = topologyReadLock();
AffinityTopologyVersion topVer = null;
try {
if (topFut == null) {
assert isDone();
return;
}
if (topFut.isDone()) {
if ((topVer = topFut.topologyVersion()) == null && topFut.error() != null) {
// Prevent stack overflow if topFut has error.
onDone(topFut.error());
return;
}
if (remap)
tx.onRemap(topVer, true);
else
tx.topologyVersion(topVer);
if (!remap)
cctx.mvcc().addFuture(this);
}
} finally {
topologyReadUnlock();
}
if (topVer != null) {
IgniteCheckedException err = tx.txState().validateTopology(cctx, tx.writeMap().isEmpty(), topFut);
if (err != null) {
onDone(err);
return;
}
if (tx.isRollbackOnly()) {
onDone(new IgniteTxRollbackCheckedException("Failed to prepare the transaction, due to the transaction is marked as rolled back " + "[tx=" + CU.txString(tx) + ']'));
return;
}
prepare0(remap, false);
if (c != null)
c.run();
} else {
cctx.time().waitAsync(topFut, tx.remainingTime(), (e, timedOut) -> {
if (errorOrTimeoutOnTopologyVersion(e, timedOut))
return;
try {
if (tx.isRollbackOnly()) {
onDone(new IgniteTxRollbackCheckedException("Failed to prepare the transaction, due to the transaction is marked as rolled back " + "[tx=" + CU.txString(tx) + ']'));
return;
}
prepareOnTopology(remap, c);
} finally {
cctx.txContextReset();
}
});
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearTxAbstractEnlistFuture method mapOnTopology.
/**
*/
private void mapOnTopology() {
cctx.topology().readLock();
boolean topLocked = true;
try {
if (cctx.topology().stopping()) {
onDone(cctx.shared().cache().isCacheRestarting(cctx.name()) ? new IgniteCacheRestartingException(cctx.name()) : new CacheStoppedException(cctx.name()));
return;
}
GridDhtTopologyFuture fut = cctx.topologyVersionFuture();
cctx.topology().readUnlock();
topLocked = false;
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, false, false, null, null);
if (err != null) {
onDone(err);
return;
}
AffinityTopologyVersion topVer = fut.topologyVersion();
tx.topologyVersion(topVer);
if (this.topVer == null)
this.topVer = topVer;
map(false);
} else {
cctx.time().waitAsync(fut, tx.remainingTime(), (e, timedOut) -> {
try {
if (e != null || timedOut)
onDone(timedOut ? tx.timeoutException() : e);
else
mapOnTopology();
} finally {
cctx.shared().txContextReset();
}
});
}
} finally {
if (topLocked)
cctx.topology().readUnlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class IgniteTxHandler method prepareNearTx.
/**
* @param originTx Transaction for copy.
* @param nearNode Node that initiated transaction.
* @param req Near prepare request.
* @return Prepare future or {@code null} if need retry operation.
*/
@Nullable
private IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final GridNearTxLocal originTx, final ClusterNode nearNode, final GridNearTxPrepareRequest req) {
IgniteTxEntry firstEntry;
try {
IgniteTxEntry firstWrite = unmarshal(req.writes());
IgniteTxEntry firstRead = unmarshal(req.reads());
firstEntry = firstWrite != null ? firstWrite : firstRead;
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
GridDhtTxLocal tx = null;
GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
if (mappedVer != null) {
tx = ctx.tm().tx(mappedVer);
if (tx == null)
U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
else {
if (req.concurrency() == PESSIMISTIC)
tx.nearFutureId(req.futureId());
}
} else {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert firstEntry != null : req;
assert req.concurrency() == OPTIMISTIC : req;
assert nearNode.isClient() : nearNode;
top = firstEntry.context().topology();
top.readLock();
if (req.allowWaitTopologyFuture()) {
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!topFut.isDone()) {
top.readUnlock();
return null;
}
}
}
try {
if (top != null) {
boolean retry = false;
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!req.allowWaitTopologyFuture() && !topFut.isDone()) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology change is in progress, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (!retry && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req)) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (retry) {
GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.lastTopologyChangeVersion(), req.onePhaseCommit(), req.deployInfo() != null);
try {
ctx.io().send(nearNode, res, req.policy());
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (IgniteCheckedException e) {
U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNode.id() + ", req=" + req + ']', e);
}
return new GridFinishedFuture<>(res);
}
assert topFut.isDone();
}
tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), originTx);
tx = ctx.tm().onCreated(null, tx);
if (tx != null)
tx.topologyVersion(req.topologyVersion());
else
U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
} finally {
if (tx != null)
req.txState(tx.txState());
if (top != null)
top.readUnlock();
}
}
if (tx != null) {
req.txState(tx.txState());
if (req.explicitLock())
tx.explicitLock(true);
tx.transactionNodes(req.transactionNodes());
if (req.near())
tx.nearOnOriginatingNode(true);
if (req.onePhaseCommit()) {
assert req.last() : req;
tx.onePhaseCommit(true);
}
if (req.needReturnValue())
tx.needReturnValue(true);
IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req);
if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
tx.rollbackDhtLocalAsync();
}
final GridDhtTxLocal tx0 = tx;
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> txFut) {
try {
txFut.get();
} catch (IgniteCheckedException e) {
// Just in case.
tx0.setRollbackOnly();
if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
}
}
});
return fut;
} else
return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class DataStreamProcessor method localUpdate.
/**
* @param nodeId Node id.
* @param req Request.
* @param updater Updater.
* @param topic Topic.
*/
private void localUpdate(final UUID nodeId, final DataStreamerRequest req, final StreamReceiver<K, V> updater, final Object topic) {
final boolean allowOverwrite = !(updater instanceof DataStreamerImpl.IsolatedUpdater);
try {
GridCacheAdapter cache = ctx.cache().internalCache(req.cacheName());
if (cache == null) {
throw new IgniteCheckedException("Cache not created or already destroyed: " + req.cacheName());
}
GridCacheContext cctx = cache.context();
DataStreamerUpdateJob job = null;
GridFutureAdapter waitFut = null;
if (!allowOverwrite)
cctx.topology().readLock();
GridDhtTopologyFuture topWaitFut = null;
try {
Exception remapErr = null;
AffinityTopologyVersion streamerFutTopVer = null;
if (!allowOverwrite) {
GridDhtTopologyFuture topFut = cctx.topologyVersionFuture();
AffinityTopologyVersion topVer = topFut.isDone() ? topFut.topologyVersion() : topFut.initialVersion();
if (topVer.compareTo(req.topologyVersion()) > 0) {
remapErr = new ClusterTopologyCheckedException("DataStreamer will retry " + "data transfer at stable topology [reqTop=" + req.topologyVersion() + ", topVer=" + topFut.initialVersion() + ", node=remote]");
} else if (!topFut.isDone())
topWaitFut = topFut;
else
streamerFutTopVer = topFut.topologyVersion();
}
if (remapErr != null) {
sendResponse(nodeId, topic, req.requestId(), remapErr, req.forceLocalDeployment());
return;
} else if (topWaitFut == null) {
job = new DataStreamerUpdateJob(ctx, log, req.cacheName(), req.entries(), req.ignoreDeploymentOwnership(), req.skipStore(), req.keepBinary(), updater);
waitFut = allowOverwrite ? null : cctx.mvcc().addDataStreamerFuture(streamerFutTopVer);
}
} finally {
if (!allowOverwrite)
cctx.topology().readUnlock();
}
if (topWaitFut != null) {
// Need call 'listen' after topology read lock is released.
topWaitFut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> e) {
localUpdate(nodeId, req, updater, topic);
}
});
return;
}
try {
job.call();
sendResponse(nodeId, topic, req.requestId(), null, req.forceLocalDeployment());
} finally {
if (waitFut != null)
waitFut.onDone();
}
} catch (Throwable e) {
sendResponse(nodeId, topic, req.requestId(), e, req.forceLocalDeployment());
if (e instanceof Error)
throw (Error) e;
}
}
Aggregations