use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridCachePartitionExchangeManager method sendAllPartitions.
/**
* @param nodes Target Nodes.
* @param msgTopVer Topology version. Will be added to full message.
* @param grps Selected cache groups.
*/
private void sendAllPartitions(Collection<ClusterNode> nodes, AffinityTopologyVersion msgTopVer, Collection<CacheGroupContext> grps) {
long time = System.currentTimeMillis();
GridDhtPartitionsFullMessage m = createPartitionsFullMessage(true, false, null, null, null, null, grps);
m.topologyVersion(msgTopVer);
if (log.isInfoEnabled()) {
long latency = System.currentTimeMillis() - time;
if (latency > 50 || log.isDebugEnabled()) {
log.info("Finished full message creation [msgTopVer=" + msgTopVer + ", groups=" + grps + ", latency=" + latency + "ms]");
}
}
if (log.isTraceEnabled())
log.trace("Sending all partitions [nodeIds=" + U.nodeIds(nodes) + ", cacheGroups=" + grps + ", msg=" + m + ']');
time = System.currentTimeMillis();
Collection<ClusterNode> failedNodes = U.newHashSet(nodes.size());
for (ClusterNode node : nodes) {
try {
assert !node.equals(cctx.localNode());
cctx.io().sendNoRetry(node, m, SYSTEM_POOL);
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled()) {
log.debug("Failed to send partition update to node because it left grid (will ignore) " + "[node=" + node.id() + ", msg=" + m + ']');
}
} catch (IgniteCheckedException e) {
failedNodes.add(node);
U.warn(log, "Failed to send partitions full message [node=" + node + ", err=" + e + ']', e);
}
}
if (log.isInfoEnabled()) {
long latency = System.currentTimeMillis() - time;
if (latency > 50 || log.isDebugEnabled()) {
log.info("Finished sending full message [msgTopVer=" + msgTopVer + ", groups=" + grps + (failedNodes.isEmpty() ? "" : (", skipped=" + U.nodeIds(failedNodes))) + ", latency=" + latency + "ms]");
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridCacheIoManager method send.
/**
* Sends communication message.
*
* @param nodeId ID of node to send the message to.
* @param msg Message to send.
* @param plc IO policy.
* @throws IgniteCheckedException If sending failed.
*/
public void send(UUID nodeId, GridCacheMessage msg, byte plc) throws IgniteCheckedException {
ClusterNode n = cctx.discovery().node(nodeId);
if (n == null)
throw new ClusterTopologyCheckedException("Failed to send message because node left grid [nodeId=" + nodeId + ", msg=" + msg + ']');
send(n, msg, plc);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridCacheAdapter method syncOp.
/**
* @param op Cache operation.
* @param <T> Return type.
* @return Operation result.
* @throws IgniteCheckedException If operation failed.
*/
@SuppressWarnings({ "ErrorNotRethrown", "AssignmentToCatchBlockParameter" })
@Nullable
private <T> T syncOp(SyncOp<T> op) throws IgniteCheckedException {
checkJta();
awaitLastFut();
GridNearTxLocal tx = checkCurrentTx();
if (tx == null || tx.implicit()) {
TransactionConfiguration tCfg = CU.transactionConfiguration(ctx, ctx.kernalContext().config());
CacheOperationContext opCtx = ctx.operationContextPerCall();
int retries = opCtx != null && opCtx.noRetries() ? 1 : MAX_RETRIES;
for (int i = 0; i < retries; i++) {
tx = ctx.tm().newTx(true, op.single(), ctx.systemTx() ? ctx : null, ctx.mvccEnabled() ? PESSIMISTIC : OPTIMISTIC, ctx.mvccEnabled() ? REPEATABLE_READ : READ_COMMITTED, tCfg.getDefaultTxTimeout(), !ctx.skipStore(), ctx.mvccEnabled(), 0, null, false);
assert tx != null;
try {
T t = op.op(tx);
assert tx.done() : "Transaction is not done: " + tx;
return t;
} catch (IgniteInterruptedCheckedException | IgniteTxHeuristicCheckedException | NodeStoppingException | IgniteConsistencyViolationException e) {
throw e;
} catch (IgniteCheckedException e) {
if (!(e instanceof IgniteTxRollbackCheckedException)) {
try {
tx.rollback();
if (!(e instanceof TransactionCheckedException))
e = new IgniteTxRollbackCheckedException("Transaction has been rolled back: " + tx.xid(), e);
} catch (IgniteCheckedException | AssertionError | RuntimeException e1) {
U.error(log, "Failed to rollback transaction (cache may contain stale locks): " + CU.txString(tx), e1);
if (e != e1)
e.addSuppressed(e1);
}
}
if (X.hasCause(e, ClusterTopologyCheckedException.class) && i != retries - 1) {
ClusterTopologyCheckedException topErr = e.getCause(ClusterTopologyCheckedException.class);
if (!(topErr instanceof ClusterTopologyServerNotFoundException)) {
AffinityTopologyVersion topVer = tx.topologyVersion();
assert topVer != null && topVer.topologyVersion() > 0 : tx;
AffinityTopologyVersion awaitVer = new AffinityTopologyVersion(topVer.topologyVersion() + 1, 0);
ctx.shared().exchange().affinityReadyFuture(awaitVer).get();
continue;
}
}
throw e;
} catch (RuntimeException e) {
try {
tx.rollback();
} catch (IgniteCheckedException | AssertionError | RuntimeException e1) {
U.error(log, "Failed to rollback transaction " + CU.txString(tx), e1);
}
throw e;
} finally {
ctx.tm().resetContext();
if (ctx.isNear())
ctx.near().dht().context().tm().resetContext();
}
}
// Should not happen.
throw new IgniteCheckedException("Failed to perform cache operation (maximum number of retries exceeded).");
} else
return op.op(tx);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtTxAbstractEnlistFuture method sendBatch.
/**
* Send batch request to remote data node.
*
* @param batch Batch.
*/
private void sendBatch(Batch batch) throws IgniteCheckedException {
assert batch != null && !batch.node().isLocal();
ClusterNode node = batch.node();
updateMappings(node);
GridDhtTxQueryEnlistRequest req;
if (newRemoteTx(node))
addNewRemoteTxNode(node);
if (firstReqSent.add(node)) {
// If this is a first request to this node, send full info.
req = new GridDhtTxQueryFirstEnlistRequest(cctx.cacheId(), futId, tx.topologyVersionSnapshot(), lockVer, mvccSnapshot.withoutActiveTransactions(), tx.remainingTime(), tx.taskNameHash(), nearNodeId, nearLockVer, it.operation(), FIRST_BATCH_ID, batch.keys(), batch.values());
} else {
// Send only keys, values, LockVersion and batchId if this is not a first request to this backup.
req = new GridDhtTxQueryEnlistRequest(cctx.cacheId(), futId, lockVer, it.operation(), ++batchIdCntr, mvccSnapshot.operationCounter(), batch.keys(), batch.values());
}
ConcurrentMap<Integer, Batch> pending0 = null;
if (pending == null)
pending = new ConcurrentHashMap<>();
else
pending0 = pending.get(node.id());
if (pending0 == null)
pending.put(node.id(), pending0 = new ConcurrentHashMap<>());
Batch prev = pending0.put(req.batchId(), batch);
assert prev == null;
try {
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException e) {
// backup node left the grid, will continue.
onNodeLeft(node.id());
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridDhtCacheAdapter method processForceKeysRequest0.
/**
* @param node Node originated request.
* @param msg Force keys message.
*/
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
try {
ClusterNode loc = ctx.localNode();
GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(ctx.cacheId(), msg.futureId(), msg.miniId(), ctx.deploymentEnabled());
GridDhtPartitionTopology top = ctx.topology();
for (KeyCacheObject k : msg.keys()) {
int p = ctx.affinity().partition(k);
GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
// If this node is no longer an owner.
if (locPart == null && !top.owners(p).contains(loc)) {
res.addMissed(k);
continue;
}
GridCacheEntryEx entry;
while (true) {
ctx.shared().database().checkpointReadLock();
try {
entry = ctx.dht().entryEx(k);
entry.unswap();
if (ctx.mvccEnabled()) {
List<GridCacheEntryInfo> infos = entry.allVersionsInfo();
if (infos == null) {
assert entry.obsolete() : entry;
continue;
}
for (int i = 0; i < infos.size(); i++) res.addInfo(infos.get(i));
} else {
GridCacheEntryInfo info = entry.info();
if (info == null) {
assert entry.obsolete() : entry;
continue;
}
if (!info.isNew())
res.addInfo(info);
}
entry.touch();
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + k);
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Local node is no longer an owner: " + p);
res.addMissed(k);
break;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
if (log.isDebugEnabled())
log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
ctx.io().send(node, res, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
}
}
Aggregations