use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project gridgain by gridgain.
the class GridDhtAtomicCache method sendDeferredUpdateResponse.
/**
* @param primaryId Primary ID.
* @param msg Message.
*/
private void sendDeferredUpdateResponse(UUID primaryId, GridDhtAtomicDeferredUpdateResponse msg) {
try {
GridTimeoutObject timeoutSnd = msg.timeoutSender();
if (timeoutSnd != null)
ctx.time().removeTimeoutObject(timeoutSnd);
ctx.io().send(primaryId, msg, ctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Sent deferred DHT update response [futIds=" + msg.futureIds() + ", node=" + primaryId + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Failed to send deferred DHT update response, node left [" + "futIds=" + msg.futureIds() + ", node=" + primaryId + ']');
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send deferredDHT update response to remote node [" + "futIds=" + msg.futureIds() + ", node=" + primaryId + ']', e);
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project gridgain by gridgain.
the class GridNearAtomicSingleUpdateFuture method waitAndRemap.
/**
* @param remapTopVer New topology version.
*/
private void waitAndRemap(AffinityTopologyVersion remapTopVer) {
if (topLocked) {
CachePartialUpdateCheckedException e = new CachePartialUpdateCheckedException("Failed to update keys (retry update if possible).");
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Failed to update keys, topology changed while execute atomic update inside transaction.");
cause.retryReadyFuture(cctx.shared().exchange().affinityReadyFuture(remapTopVer));
e.add(Collections.singleton(cctx.toCacheKeyObject(key)), cause);
completeFuture(null, e, null);
return;
}
IgniteInternalFuture<AffinityTopologyVersion> fut = cctx.shared().exchange().affinityReadyFuture(remapTopVer);
if (fut == null)
fut = new GridFinishedFuture<>(remapTopVer);
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(final IgniteInternalFuture<AffinityTopologyVersion> fut) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
try (TraceSurroundings ignored = MTC.support(cctx.kernalContext().tracing().create(CACHE_API_UPDATE_MAP, MTC.span()))) {
mapOnTopology();
}
}
});
}
});
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project gridgain by gridgain.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
* @param remapNode Node for remap. In case update with {@code allowOverride() == false} fails on one node,
* we don't need to send update request to all affinity nodes again, if topology version does not changed.
* @param remapTopVer Topology version.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps, ClusterNode remapNode, AffinityTopologyVersion remapTopVer) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal()) {
GridDhtPartitionsExchangeFuture exchFut = ctx.cache().context().exchange().lastTopologyFuture();
if (!exchFut.isDone()) {
ExchangeActions acts = exchFut.exchangeActions();
if (acts != null && acts.cacheStopped(CU.cacheId(cacheName)))
throw new CacheStoppedException(cacheName);
}
// It is safe to block here even if the cache gate is acquired.
topVer = exchFut.get();
} else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
if (!allowOverwrite() && remapNode != null && F.eq(topVer, remapTopVer))
nodes = Collections.singletonList(remapNode);
else
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final ClusterNode node = e.getKey();
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else if (X.hasCause(e1, IgniteClusterReadOnlyException.class)) {
resFut.onDone(new IgniteClusterReadOnlyException("Failed to finish operation. Cluster in read-only mode!", e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1, node, topVer);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new GridPlainRunnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project gridgain by gridgain.
the class GridContinuousProcessor method registerHandler.
/**
* @param nodeId Node ID.
* @param routineId Consume ID.
* @param hnd Handler.
* @param bufSize Buffer size.
* @param interval Time interval.
* @param autoUnsubscribe Automatic unsubscribe flag.
* @param loc Local registration flag.
* @return Whether listener was actually registered.
* @throws IgniteCheckedException In case of error.
*/
private boolean registerHandler(final UUID nodeId, final UUID routineId, final GridContinuousHandler hnd, int bufSize, final long interval, boolean autoUnsubscribe, boolean loc) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert hnd != null;
assert bufSize > 0;
assert interval >= 0;
final RemoteRoutineInfo info = new RemoteRoutineInfo(nodeId, hnd, bufSize, interval, autoUnsubscribe);
boolean doRegister = loc;
if (!doRegister) {
stopLock.lock();
try {
doRegister = rmtInfos.putIfAbsent(routineId, info) == null;
} finally {
stopLock.unlock();
}
}
if (doRegister) {
if (log.isDebugEnabled())
log.debug("Register handler: [nodeId=" + nodeId + ", routineId=" + routineId + ", info=" + info + ']');
if (interval > 0) {
IgniteThread checker = new IgniteThread(new GridWorker(ctx.igniteInstanceName(), "continuous-buffer-checker", log) {
@Override
protected void body() {
long interval0 = interval;
while (!isCancelled()) {
try {
U.sleep(interval0);
} catch (IgniteInterruptedCheckedException ignored) {
break;
}
IgniteBiTuple<GridContinuousBatch, Long> t = info.checkInterval();
final GridContinuousBatch batch = t.get1();
if (batch != null && batch.size() > 0) {
try {
Collection<Object> toSnd = batch.collect();
boolean msg = toSnd.iterator().next() instanceof Message;
CI1<IgniteException> ackC = new CI1<IgniteException>() {
@Override
public void apply(IgniteException e) {
if (e == null)
info.hnd.onBatchAcknowledged(routineId, batch, ctx);
}
};
sendNotification(nodeId, routineId, null, toSnd, hnd.orderedTopic(), msg, ackC);
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send notification to node (is node alive?): " + nodeId);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send notification to node: " + nodeId, e);
}
}
interval0 = t.get2();
}
}
});
checker.setUncaughtExceptionHandler(new OomExceptionHandler(ctx));
bufCheckThreads.put(routineId, checker);
checker.start();
}
GridContinuousHandler.RegisterStatus status = hnd.register(nodeId, routineId, ctx);
if (status == GridContinuousHandler.RegisterStatus.DELAYED) {
info.markDelayedRegister();
return false;
} else
return status == GridContinuousHandler.RegisterStatus.REGISTERED;
}
return false;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project gridgain by gridgain.
the class ClusterProcessor method initDiagnosticListeners.
/**
* @throws IgniteCheckedException If failed.
*/
public void initDiagnosticListeners() throws IgniteCheckedException {
ctx.event().addLocalEventListener(evt -> {
assert evt instanceof DiscoveryEvent;
assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT;
DiscoveryEvent discoEvt = (DiscoveryEvent) evt;
UUID nodeId = discoEvt.eventNode().id();
ConcurrentHashMap<Long, InternalDiagnosticFuture> futs = diagnosticFutMap.get();
if (futs != null) {
for (InternalDiagnosticFuture fut : futs.values()) {
if (fut.nodeId.equals(nodeId))
fut.onDone(new IgniteDiagnosticInfo("Target node failed: " + nodeId));
}
}
allNodesMetrics.remove(nodeId);
}, EVT_NODE_FAILED, EVT_NODE_LEFT);
ctx.io().addMessageListener(TOPIC_INTERNAL_DIAGNOSTIC, (nodeId, msg, plc) -> {
if (msg instanceof IgniteDiagnosticMessage) {
IgniteDiagnosticMessage msg0 = (IgniteDiagnosticMessage) msg;
if (msg0.request()) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
if (diagnosticLog.isDebugEnabled()) {
diagnosticLog.debug("Skip diagnostic request, sender node left " + "[node=" + nodeId + ", msg=" + msg + ']');
}
return;
}
byte[] diagRes;
IgniteClosure<GridKernalContext, IgniteDiagnosticInfo> c;
try {
c = msg0.unmarshal(marsh);
Objects.requireNonNull(c);
diagRes = marsh.marshal(c.apply(ctx));
} catch (Exception e) {
U.error(diagnosticLog, "Failed to run diagnostic closure: " + e, e);
try {
IgniteDiagnosticInfo errInfo = new IgniteDiagnosticInfo("Failed to run diagnostic closure: " + e);
diagRes = marsh.marshal(errInfo);
} catch (Exception e0) {
U.error(diagnosticLog, "Failed to marshal diagnostic closure result: " + e, e);
diagRes = null;
}
}
IgniteDiagnosticMessage res = IgniteDiagnosticMessage.createResponse(diagRes, msg0.futureId());
try {
ctx.io().sendToGridTopic(node, TOPIC_INTERNAL_DIAGNOSTIC, res, GridIoPolicy.SYSTEM_POOL);
} catch (ClusterTopologyCheckedException e) {
if (diagnosticLog.isDebugEnabled()) {
diagnosticLog.debug("Failed to send diagnostic response, node left " + "[node=" + nodeId + ", msg=" + msg + ']');
}
} catch (IgniteCheckedException e) {
U.error(diagnosticLog, "Failed to send diagnostic response [msg=" + msg0 + "]", e);
}
} else {
InternalDiagnosticFuture fut = diagnosticFuturesMap().get(msg0.futureId());
if (fut != null) {
IgniteDiagnosticInfo res;
try {
res = msg0.unmarshal(marsh);
if (res == null)
res = new IgniteDiagnosticInfo("Remote node failed to marshal response.");
} catch (Exception e) {
U.error(diagnosticLog, "Failed to unmarshal diagnostic response: " + e, e);
res = new IgniteDiagnosticInfo("Failed to unmarshal diagnostic response: " + e);
}
fut.onResponse(res);
} else
U.warn(diagnosticLog, "Failed to find diagnostic message future [msg=" + msg0 + ']');
}
} else
U.warn(diagnosticLog, "Received unexpected message: " + msg);
});
if (sndMetrics) {
ctx.io().addMessageListener(TOPIC_METRICS, (nodeId, msg, plc) -> {
if (msg instanceof ClusterMetricsUpdateMessage)
processMetricsUpdateMessage(nodeId, (ClusterMetricsUpdateMessage) msg);
else
U.warn(log, "Received unexpected message for TOPIC_METRICS: " + msg);
});
}
}
Aggregations