use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridIoManager method send.
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param topicOrd GridTopic enumeration ordinal.
* @param msg Message to send.
* @param plc Type of processing.
* @param ordered Ordered flag.
* @param timeout Timeout.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @param ackC Ack closure.
* @param async If {@code true} message for local node will be processed in pool, otherwise in current thread.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
private void send(ClusterNode node, Object topic, int topicOrd, Message msg, byte plc, boolean ordered, long timeout, boolean skipOnTimeout, IgniteInClosure<IgniteException> ackC, boolean async) throws IgniteCheckedException {
assert node != null;
assert topic != null;
assert msg != null;
// Async execution was added only for IgniteMessaging.
assert !async || msg instanceof GridIoUserMessage : msg;
assert topicOrd >= 0 || !(topic instanceof GridTopic) : msg;
try (TraceSurroundings ignored = support(null)) {
MTC.span().addLog(() -> "Create communication msg - " + traceName(msg));
GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, msg, plc, ordered, timeout, skipOnTimeout);
if (locNodeId.equals(node.id())) {
assert plc != P2P_POOL;
CommunicationListener commLsnr = this.commLsnr;
if (commLsnr == null)
throw new IgniteCheckedException("Trying to send message when grid is not fully started.");
if (ordered)
processOrderedMessage(locNodeId, ioMsg, plc, null);
else if (async)
processRegularMessage(locNodeId, ioMsg, plc, NOOP);
else
processRegularMessage0(ioMsg, locNodeId);
if (ackC != null)
ackC.apply(null);
} else {
if (topicOrd < 0)
ioMsg.topicBytes(U.marshal(marsh, topic));
try {
if ((CommunicationSpi<?>) getSpi() instanceof TcpCommunicationSpi)
getTcpCommunicationSpi().sendMessage(node, ioMsg, ackC);
else
getSpi().sendMessage(node, ioMsg);
} catch (IgniteSpiException e) {
if (e.getCause() instanceof ClusterTopologyCheckedException)
throw (ClusterTopologyCheckedException) e.getCause();
if (!ctx.discovery().alive(node))
throw new ClusterTopologyCheckedException("Failed to send message, node left: " + node.id(), e);
throw new IgniteCheckedException("Failed to send message (node may have left the grid or " + "TCP connection cannot be established due to firewall issues) " + "[node=" + node + ", topic=" + topic + ", msg=" + msg + ", policy=" + plc + ']', e);
}
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridIoManager method openChannel.
/**
* @param nodeId Destination node to connect to.
* @param topic Topic to send the request to.
* @param initMsg Channel initialization message.
* @return Established {@link Channel} to use.
* @throws IgniteCheckedException If fails.
*/
private IgniteInternalFuture<Channel> openChannel(UUID nodeId, Object topic, Message initMsg) throws IgniteCheckedException {
assert nodeId != null;
assert topic != null;
assert !locNodeId.equals(nodeId) : "Channel cannot be opened to the local node itself: " + nodeId;
assert (CommunicationSpi) getSpi() instanceof TcpCommunicationSpi : "Only TcpCommunicationSpi supports direct " + "connections between nodes: " + getSpi().getClass();
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
throw new ClusterTopologyCheckedException("Failed to open a new channel to remote node (node left): " + nodeId);
int topicOrd = topic instanceof GridTopic ? ((Enum<GridTopic>) topic).ordinal() : -1;
GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, initMsg, PUBLIC_POOL, false, 0, false);
try {
if (topicOrd < 0)
ioMsg.topicBytes(U.marshal(marsh, topic));
return ((TcpCommunicationSpi) (CommunicationSpi) getSpi()).openChannel(node, ioMsg);
} catch (IgniteSpiException e) {
if (e.getCause() instanceof ClusterTopologyCheckedException)
throw (ClusterTopologyCheckedException) e.getCause();
if (!ctx.discovery().alive(node))
throw new ClusterTopologyCheckedException("Failed to create channel (node left): " + node.id(), e);
throw new IgniteCheckedException("Failed to create channel (node may have left the grid or " + "TCP connection cannot be established due to unknown issues) " + "[node=" + node + ", topic=" + topic + ']', e);
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtAtomicCache method sendDeferredUpdateResponse.
/**
* @param primaryId Primary ID.
* @param msg Message.
*/
private void sendDeferredUpdateResponse(UUID primaryId, GridDhtAtomicDeferredUpdateResponse msg) {
try {
GridTimeoutObject timeoutSnd = msg.timeoutSender();
if (timeoutSnd != null)
ctx.time().removeTimeoutObject(timeoutSnd);
ctx.io().send(primaryId, msg, ctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Sent deferred DHT update response [futIds=" + msg.futureIds() + ", node=" + primaryId + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Failed to send deferred DHT update response, node left [" + "futIds=" + msg.futureIds() + ", node=" + primaryId + ']');
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send deferredDHT update response to remote node [" + "futIds=" + msg.futureIds() + ", node=" + primaryId + ']', e);
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridNearAtomicSingleUpdateFuture method waitAndRemap.
/**
* @param remapTopVer New topology version.
*/
private void waitAndRemap(AffinityTopologyVersion remapTopVer) {
if (topLocked) {
CachePartialUpdateCheckedException e = new CachePartialUpdateCheckedException("Failed to update keys (retry update if possible).");
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Failed to update keys, topology changed while execute atomic update inside transaction.");
cause.retryReadyFuture(cctx.shared().exchange().affinityReadyFuture(remapTopVer));
e.add(Collections.singleton(cctx.toCacheKeyObject(key)), cause);
completeFuture(null, e, null);
return;
}
IgniteInternalFuture<AffinityTopologyVersion> fut = cctx.shared().exchange().affinityReadyFuture(remapTopVer);
if (fut == null)
fut = new GridFinishedFuture<>(remapTopVer);
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(final IgniteInternalFuture<AffinityTopologyVersion> fut) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtTxFinishFuture method finish.
/**
* @param commit Commit flag.
* @param dhtMap DHT map.
* @param nearMap Near map.
* @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
*/
private boolean finish(boolean commit, Map<UUID, GridDistributedTxMapping> dhtMap, Map<UUID, GridDistributedTxMapping> nearMap) {
if (tx.onePhaseCommit())
return false;
assert !commit || !tx.txState().mvccEnabled() || tx.mvccSnapshot() != null || F.isEmpty(tx.writeEntries());
boolean sync = tx.syncMode() == FULL_SYNC;
if (tx.explicitLock() || tx.queryEnlisted())
sync = true;
boolean res = false;
int miniId = 0;
// Do not need process active transactions on backups.
MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
if (mvccSnapshot != null)
mvccSnapshot = mvccSnapshot.withoutActiveTransactions();
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : dhtMap.values()) {
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = nearMap.get(n.id());
if (!dhtMapping.queryUpdate() && dhtMapping.empty() && nearMapping != null && nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync || !commit ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), null, false, false, mvccSnapshot, commit ? null : cctx.tm().txHandler().filterUpdateCountersForBackupNode(tx, n));
req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion());
try {
if (isNull(cctx.discovery().getAlive(n.id()))) {
log.error("Unable to send message (node left topology): " + n);
fut.onNodeLeft(new ClusterTopologyCheckedException("Node left grid while sending message to: " + n.id()));
} else {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
if (sync || !commit)
// Force sync mode for rollback to prevent an issue with concurrent recovery.
res = true;
else
fut.onDone();
}
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
for (GridDistributedTxMapping nearMapping : nearMap.values()) {
if (!dhtMap.containsKey(nearMapping.primary().id())) {
if (nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), false, false, mvccSnapshot, null);
req.writeVersion(tx.writeVersion());
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
if (sync)
res = true;
else
fut.onDone();
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
}
return res;
}
Aggregations