use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridNearTxFinishFuture method finish.
/**
* @param miniId Mini future ID.
* @param m Mapping.
* @param commit Commit flag.
*/
private void finish(int miniId, GridDistributedTxMapping m, boolean commit) {
ClusterNode n = m.primary();
assert !m.empty() : m;
CacheWriteSynchronizationMode syncMode = tx.syncMode();
if (m.explicitLock())
syncMode = FULL_SYNC;
// Version to be added in completed versions on primary node.
GridCacheVersion completedVer = !commit && tx.timeout() > 0 ? tx.xidVersion() : null;
GridNearTxFinishRequest req = new GridNearTxFinishRequest(futId, tx.xidVersion(), tx.threadId(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), syncMode, m.explicitLock(), tx.storeEnabled(), tx.topologyVersion(), // Reuse 'baseVersion' to do not add new fields in message.
completedVer, null, null, tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled());
// If this is the primary node for the keys.
if (n.isLocal()) {
req.miniId(miniId);
IgniteInternalFuture<IgniteInternalTx> fut = cctx.tm().txHandler().finish(n.id(), tx, req);
// Add new future.
if (fut != null && syncMode == FULL_SYNC)
add(fut);
} else {
FinishMiniFuture fut = new FinishMiniFuture(miniId, m);
req.miniId(fut.futureId());
// Append new future.
add(fut);
if (tx.pessimistic())
cctx.tm().beforeFinishRemote(n.id(), tx.threadId());
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, sent request [" + "txId=" + tx.nearXidVersion() + ", node=" + n.id() + ']');
}
boolean wait = syncMode != FULL_ASYNC;
// If we don't wait for result, then mark future as done.
if (!wait)
fut.onDone();
} catch (ClusterTopologyCheckedException ignored) {
// Remove previous mapping.
mappings.remove(m.primary().id());
fut.onNodeLeft(n.id(), false);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to send request [" + "txId=" + tx.nearXidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
// Fail the whole thing.
fut.onDone(e);
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridPartitionedSingleGetFuture method map.
/**
* @param topVer Topology version.
*/
@SuppressWarnings("unchecked")
private void map(final AffinityTopologyVersion topVer) {
ClusterNode node = mapKeyToNode(topVer);
if (node == null) {
assert isDone() : this;
return;
}
if (isDone())
return;
if (node.isLocal()) {
final GridDhtFuture<GridCacheEntryInfo> fut = cctx.dht().getDhtSingleAsync(node.id(), -1, key, false, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology " + "version did not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(updTopVer);
} else {
fut.listen(new CI1<IgniteInternalFuture<GridCacheEntryInfo>>() {
@Override
public void apply(IgniteInternalFuture<GridCacheEntryInfo> fut) {
try {
GridCacheEntryInfo info = fut.get();
setResult(info);
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
}
}
});
}
} else {
synchronized (this) {
assert this.node == null;
this.topVer = topVer;
this.node = node;
}
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
boolean needVer = this.needVer;
final BackupPostProcessingClosure postClos = CU.createBackupPostProcessingClosure(topVer, log, cctx, key, expiryPlc, readThrough, skipVals);
if (postClos != null) {
// Need version to correctly store value.
needVer = true;
postProcessingClos = postClos;
}
GridCacheMessage req = new GridNearSingleGetRequest(cctx.cacheId(), futId.localId(), key, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, /*add reader*/
false, needVer, cctx.deploymentEnabled(), recovery);
try {
cctx.io().send(node, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
if (e instanceof ClusterTopologyCheckedException)
onNodeLeft(node.id());
else
onDone(e);
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridNearAtomicAbstractUpdateFuture method primaryFailedResponse.
/**
* @param req Request.
* @return Response to notify about primary failure.
*/
final GridNearAtomicUpdateResponse primaryFailedResponse(GridNearAtomicAbstractUpdateRequest req) {
assert req.response() == null : req;
assert req.nodeId() != null : req;
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near update fut, node left [futId=" + req.futureId() + ", node=" + req.nodeId() + ']');
}
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(cctx.cacheId(), req.nodeId(), req.futureId(), req.partition(), true, cctx.deploymentEnabled());
ClusterTopologyCheckedException e = new ClusterTopologyCheckedException("Primary node left grid " + "before response is received: " + req.nodeId());
e.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(req.topologyVersion()));
res.addFailedKeys(req.keys(), e);
return res;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridNearAtomicUpdateFuture method onAllReceived.
/**
* @return Non null topology version if update should be remapped.
*/
@Nullable
private AffinityTopologyVersion onAllReceived() {
assert Thread.holdsLock(this);
assert futureMapped() : this;
AffinityTopologyVersion remapTopVer0 = null;
if (remapKeys != null) {
assert remapTopVer != null;
remapTopVer0 = remapTopVer;
} else {
if (err != null && X.hasCause(err, CachePartialUpdateCheckedException.class) && X.hasCause(err, ClusterTopologyCheckedException.class) && storeFuture() && --remapCnt > 0) {
ClusterTopologyCheckedException topErr = X.cause(err, ClusterTopologyCheckedException.class);
if (!(topErr instanceof ClusterTopologyServerNotFoundException)) {
CachePartialUpdateCheckedException cause = X.cause(err, CachePartialUpdateCheckedException.class);
assert cause != null && cause.topologyVersion() != null : err;
assert remapKeys == null;
assert remapTopVer == null;
remapTopVer = remapTopVer0 = new AffinityTopologyVersion(cause.topologyVersion().topologyVersion() + 1);
err = null;
Collection<Object> failedKeys = cause.failedKeys();
remapKeys = new ArrayList<>(failedKeys.size());
for (Object key : failedKeys) remapKeys.add(cctx.toCacheKeyObject(key));
}
}
}
if (remapTopVer0 != null) {
cctx.mvcc().removeAtomicFuture(futId);
topVer = AffinityTopologyVersion.ZERO;
futId = 0;
remapTopVer = null;
}
return remapTopVer0;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtColocatedCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param threadId Thread ID.
* @param ver Lock version.
* @param keys Keys.
*/
public void removeLocks(long threadId, GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (KeyCacheObject key : keys) {
IgniteTxKey txKey = ctx.txKey(key);
GridCacheMvccCandidate lock = ctx.mvcc().removeExplicitLock(threadId, txKey, ver);
if (lock != null) {
AffinityTopologyVersion topVer = lock.topologyVersion();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, topVer);
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to remove locks (all partition nodes left the grid).");
continue;
}
if (!primary.isLocal()) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
GridCacheEntryEx entry = peekEx(key);
KeyCacheObject key0 = entry != null ? entry.key() : key;
req.addKey(key0, ctx);
} else
locKeys.add(key);
}
}
if (!locKeys.isEmpty())
removeLocks(ctx.localNodeId(), ver, locKeys, true);
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
try {
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to send unlock request (node has left the grid) [keys=" + req.keys() + ", n=" + n + ", e=" + e + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request [keys=" + req.keys() + ", n=" + n + ']', e);
}
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
Aggregations