use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class TestRecordingCommunicationSpi method sendMessage.
/**
* {@inheritDoc}
*/
@Override
public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) throws IgniteSpiException {
// All ignite code expects that 'send' fails after discovery listener for node fail finished.
if (getSpiContext().node(node.id()) == null) {
throw new IgniteSpiException(new ClusterTopologyCheckedException("Failed to send message" + " (node left topology): " + node));
}
if (msg instanceof GridIoMessage) {
GridIoMessage ioMsg = (GridIoMessage) msg;
Message msg0 = ioMsg.message();
synchronized (this) {
boolean record = (recordClasses != null && recordClasses.contains(msg0.getClass())) || (recordP != null && recordP.apply(node, msg0));
if (record)
recordedMsgs.add(msg0);
boolean block = false;
if (blockP != null && blockP.apply(node, msg0))
block = true;
else {
Set<String> blockNodes = blockCls.get(msg0.getClass());
if (blockNodes != null) {
String nodeName = (String) node.attributes().get(ATTR_IGNITE_INSTANCE_NAME);
block = blockNodes.contains(nodeName);
}
}
if (block) {
ignite.log().info("Block message [node=" + node.id() + ", msg=" + ioMsg.message() + ']');
blockedMsgs.add(new T2<>(node, ioMsg));
notifyAll();
return;
} else if (record)
notifyAll();
}
}
super.sendMessage(node, msg, ackC);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridCacheUtils method retryTopologySafe.
/**
* @param c Closure to retry.
* @throws IgniteCheckedException If failed.
* @return Closure result.
*/
public static <S> S retryTopologySafe(final Callable<S> c) throws IgniteCheckedException {
IgniteCheckedException err = null;
for (int i = 0; i < GridCacheAdapter.MAX_RETRIES; i++) {
try {
return c.call();
} catch (ClusterGroupEmptyCheckedException | ClusterTopologyServerNotFoundException e) {
throw e;
} catch (TransactionRollbackException e) {
if (i + 1 == GridCacheAdapter.MAX_RETRIES)
throw e;
U.sleep(1);
} catch (IgniteCheckedException e) {
if (i + 1 == GridCacheAdapter.MAX_RETRIES)
throw e;
if (X.hasCause(e, ClusterTopologyCheckedException.class)) {
ClusterTopologyCheckedException topErr = e.getCause(ClusterTopologyCheckedException.class);
if (topErr instanceof ClusterGroupEmptyCheckedException || topErr instanceof ClusterTopologyServerNotFoundException)
throw e;
// IGNITE-1948: remove this check when the issue is fixed
if (topErr.retryReadyFuture() != null)
topErr.retryReadyFuture().get();
else
U.sleep(1);
} else if (X.hasCause(e, IgniteTxRollbackCheckedException.class, CachePartialUpdateCheckedException.class))
U.sleep(1);
else
throw e;
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IgniteCheckedException(e);
}
}
// Should never happen.
throw err;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtTxPrepareFuture method sendPrepareRequests.
/**
*/
private void sendPrepareRequests() {
if (tx.onePhaseCommit() && !tx.nearMap().isEmpty()) {
for (GridDistributedTxMapping nearMapping : tx.nearMap().values()) {
if (!tx.dhtMap().containsKey(nearMapping.primary().id())) {
tx.onePhaseCommit(false);
break;
}
}
}
int miniId = 0;
assert tx.transactionNodes() != null;
final long timeout = timeoutObj != null ? timeoutObj.timeout : 0;
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : tx.dhtMap().values()) {
assert !dhtMapping.empty();
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = tx.nearMap().get(n.id());
Collection<IgniteTxEntry> nearWrites = nearMapping == null ? null : nearMapping.writes();
Collection<IgniteTxEntry> dhtWrites = dhtMapping.writes();
if (F.isEmpty(dhtWrites) && F.isEmpty(nearWrites))
continue;
if (tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(n.id(), ++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
assert req.transactionNodes() != null;
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, dhtWrites, nearWrites, this.req.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal);
int idx = 0;
for (IgniteTxEntry entry : dhtWrites) {
try {
GridDhtCacheEntry cached = (GridDhtCacheEntry) entry.cached();
GridCacheContext<?, ?> cacheCtx = cached.context();
// Do not invalidate near entry on originating transaction node.
req.invalidateNearEntry(idx, !tx.nearNodeId().equals(n.id()) && cached.readerId(n.id()) != null);
if (cached.isNewLocked()) {
List<ClusterNode> owners = cacheCtx.topology().owners(cached.partition(), tx != null ? tx.topologyVersion() : cacheCtx.affinity().affinityTopologyVersion());
// Do not preload if local node is a partition owner.
if (!owners.contains(cctx.localNode()))
req.markKeyForPreload(idx);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
idx++;
}
if (!F.isEmpty(nearWrites)) {
for (IgniteTxEntry entry : nearWrites) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Missing candidate for cache entry:" + entry;
assert added.dhtLocal();
if (added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (!cctx.kernalContext().isStopping()) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
fut.onResult(e);
} else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request dht, ignore [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
}
}
}
for (GridDistributedTxMapping nearMapping : tx.nearMap().values()) {
if (!tx.dhtMap().containsKey(nearMapping.primary().id())) {
if (tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(nearMapping.primary().id(), ++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, null, nearMapping.writes(), tx.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal);
for (IgniteTxEntry entry : nearMapping.entries()) {
if (CU.writes().apply(entry)) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Null candidate for non-group-lock entry " + "[added=" + added + ", entry=" + entry + ']';
assert added.dhtLocal() : "Got non-dht-local candidate for prepare future" + "[added=" + added + ", entry=" + entry + ']';
if (added != null && added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (!cctx.kernalContext().isStopping()) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
fut.onResult(e);
} else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near, ignore [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
}
}
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridPartitionedGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version on which keys should be mapped.
*/
private void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, final AffinityTopologyVersion topVer) {
Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);
if (cacheNodes.isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid) [topVer=" + topVer + ", cache=" + cctx.name() + ']'));
return;
}
GridDhtTopologyFuture topFut = cctx.shared().exchange().lastFinishedFuture();
Throwable err = topFut != null ? topFut.validateCache(cctx, recovery, true, null, keys) : null;
if (err != null) {
onDone(err);
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size());
final int keysSize = keys.size();
Map<K, V> locVals = U.newHashMap(keysSize);
boolean hasRmtNodes = false;
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) hasRmtNodes |= map(key, mappings, locVals, topVer, mapped);
if (isDone())
return;
if (!locVals.isEmpty())
add(new GridFinishedFuture<>(locVals));
if (hasRmtNodes) {
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
}
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
final ClusterNode n = entry.getKey();
final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = cache().getDhtAsync(n.id(), -1, mappedKeys, false, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
remapKeys.add(key);
}
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
@Override
public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
return createResultMap(fut.get());
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}
}));
} else {
MiniFuture fut = new MiniFuture(n, mappedKeys, topVer, CU.createBackupPostProcessingClosure(topVer, log, cctx, null, expiryPlc, readThrough, skipVals));
GridCacheMessage req = new GridNearGetRequest(cctx.cacheId(), futId, fut.futureId(), null, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, false, skipVals, cctx.deploymentEnabled(), recovery);
// Append new future.
add(fut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else
fut.onResult(e);
}
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridNearTxFinishFuture method checkBackup.
/**
*/
private void checkBackup() {
assert !hasFutures() : futures();
GridDistributedTxMapping mapping = mappings.singleMapping();
if (mapping != null) {
UUID nodeId = mapping.primary().id();
Collection<UUID> backups = tx.transactionNodes().get(nodeId);
if (!F.isEmpty(backups)) {
assert backups.size() == 1;
UUID backupId = F.first(backups);
ClusterNode backup = cctx.discovery().node(backupId);
// Nothing to do if backup has left the grid.
if (backup == null) {
readyNearMappingFromBackup(mapping);
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Backup node left grid: " + backupId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(backup has left grid): " + tx.xidVersion(), cause));
} else {
final CheckBackupMiniFuture mini = new CheckBackupMiniFuture(1, backup, mapping);
add(mini);
if (backup.isLocal()) {
boolean committed = !cctx.tm().addRolledbackTx(tx);
readyNearMappingFromBackup(mapping);
if (committed) {
try {
if (tx.needReturnValue() && tx.implicit()) {
GridCacheReturnCompletableWrapper wrapper = cctx.tm().getCommittedTxReturn(tx.xidVersion());
assert wrapper != null : tx.xidVersion();
GridCacheReturn retVal = wrapper.fut().get();
assert retVal != null;
tx.implicitSingleResult(retVal);
}
if (tx.syncMode() == FULL_SYNC) {
GridCacheVersion nearXidVer = tx.nearXidVersion();
assert nearXidVer != null : tx;
IgniteInternalFuture<?> fut = cctx.tm().remoteTxFinishFuture(nearXidVer);
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
mini.onDone(tx);
}
});
return;
}
mini.onDone(tx);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to finish [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
} else {
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Primary node left grid: " + nodeId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
mini.onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(transaction has been rolled back on backup node): " + tx.xidVersion(), cause));
}
} else {
GridDhtTxFinishRequest finishReq = checkCommittedRequest(mini.futureId(), false);
try {
cctx.io().send(backup, finishReq, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, sent check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
mini.onNodeLeft(backupId, false);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to send check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
}
}
} else
readyNearMappingFromBackup(mapping);
}
}
Aggregations