use of org.apache.ignite.thread.OomExceptionHandler in project ignite by apache.
the class GridDiscoveryManager method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
ctx.addNodeAttribute(ATTR_OFFHEAP_SIZE, requiredOffheap());
ctx.addNodeAttribute(ATTR_DATA_REGIONS_OFFHEAP_SIZE, configuredOffheap());
DiscoverySpi spi = getSpi();
discoOrdered = discoOrdered();
histSupported = historySupported();
isLocDaemon = ctx.isDaemon();
hasRslvrs = !ctx.config().isClientMode() && !F.isEmpty(ctx.config().getSegmentationResolvers());
segChkFreq = ctx.config().getSegmentCheckFrequency();
if (hasRslvrs) {
if (segChkFreq < 0)
throw new IgniteCheckedException("Segment check frequency cannot be negative: " + segChkFreq);
if (segChkFreq > 0 && segChkFreq < 2000)
U.warn(log, "Configuration parameter 'segmentCheckFrequency' is too low " + "(at least 2000 ms recommended): " + segChkFreq);
int segResAttemp = ctx.config().getSegmentationResolveAttempts();
if (segResAttemp < 1)
throw new IgniteCheckedException("Segment resolve attempts cannot be negative or zero: " + segResAttemp);
checkSegmentOnStart();
}
spi.setMetricsProvider(createMetricsProvider());
if (ctx.security().enabled()) {
if (isSecurityCompatibilityMode())
ctx.addNodeAttribute(ATTR_SECURITY_COMPATIBILITY_MODE, true);
spi.setAuthenticator(new DiscoverySpiNodeAuthenticator() {
@Override
public SecurityContext authenticateNode(ClusterNode node, SecurityCredentials cred) {
try {
return ctx.security().authenticateNode(node, cred);
} catch (IgniteCheckedException e) {
throw U.convertException(e);
}
}
@Override
public boolean isGlobalNodeAuthentication() {
return ctx.security().isGlobalNodeAuthentication();
}
});
}
if (ctx.config().getCommunicationFailureResolver() != null)
ctx.resource().injectGeneric(ctx.config().getCommunicationFailureResolver());
// Shared reference between DiscoverySpiListener and DiscoverySpiDataExchange.
AtomicReference<IgniteFuture<?>> lastStateChangeEvtLsnrFutRef = new AtomicReference<>();
spi.setListener(new DiscoverySpiListener() {
private long gridStartTime;
private final Marshaller marshaller = MarshallerUtils.jdkMarshaller(ctx.igniteInstanceName());
/**
* {@inheritDoc}
*/
@Override
public void onLocalNodeInitialized(ClusterNode locNode) {
for (IgniteInClosure<ClusterNode> lsnr : locNodeInitLsnrs) lsnr.apply(locNode);
if (locNode instanceof IgniteClusterNode) {
final IgniteClusterNode node = (IgniteClusterNode) locNode;
if (consistentId != null)
node.setConsistentId(consistentId);
}
}
/**
* {@inheritDoc}
*/
@Override
public IgniteFuture<?> onDiscovery(DiscoveryNotification notification) {
GridFutureAdapter<?> notificationFut = new GridFutureAdapter<>();
discoNtfWrk.submit(notificationFut, ctx.security().enabled() ? new SecurityAwareNotificationTask(notification) : new NotificationTask(notification));
IgniteFuture<?> fut = new IgniteFutureImpl<>(notificationFut);
// TODO could be optimized with more specific conditions.
switch(notification.type()) {
case EVT_NODE_JOINED:
case EVT_NODE_LEFT:
case EVT_NODE_FAILED:
if (!CU.isPersistenceEnabled(ctx.config()))
lastStateChangeEvtLsnrFutRef.set(fut);
break;
case EVT_DISCOVERY_CUSTOM_EVT:
lastStateChangeEvtLsnrFutRef.set(fut);
}
return fut;
}
/**
* @param notification Notification.
*/
private void onDiscovery0(DiscoveryNotification notification) {
int type = notification.type();
ClusterNode node = notification.getNode();
long topVer = notification.getTopVer();
DiscoveryCustomMessage customMsg = notification.getCustomMsgData() == null ? null : ((CustomMessageWrapper) notification.getCustomMsgData()).delegate();
if (skipMessage(notification.type(), customMsg))
return;
final ClusterNode locNode = localNode();
if (notification.getTopHist() != null)
topHist = notification.getTopHist();
boolean verChanged;
if (type == EVT_NODE_METRICS_UPDATED)
verChanged = false;
else {
if (type != EVT_NODE_SEGMENTED && type != EVT_CLIENT_NODE_DISCONNECTED && type != EVT_CLIENT_NODE_RECONNECTED && type != EVT_DISCOVERY_CUSTOM_EVT) {
minorTopVer = 0;
verChanged = true;
} else
verChanged = false;
}
if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT) {
for (DiscoCache c : discoCacheHist.values()) c.updateAlives(node);
updateClientNodes(node.id());
}
boolean locJoinEvt = type == EVT_NODE_JOINED && node.id().equals(locNode.id());
ChangeGlobalStateFinishMessage stateFinishMsg = null;
if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT)
stateFinishMsg = ctx.state().onNodeLeft(node);
final AffinityTopologyVersion nextTopVer;
if (type == EVT_DISCOVERY_CUSTOM_EVT) {
assert customMsg != null;
boolean incMinorTopVer;
if (customMsg instanceof ChangeGlobalStateMessage) {
incMinorTopVer = ctx.state().onStateChangeMessage(new AffinityTopologyVersion(topVer, minorTopVer), (ChangeGlobalStateMessage) customMsg, discoCache());
} else if (customMsg instanceof ChangeGlobalStateFinishMessage) {
ctx.state().onStateFinishMessage((ChangeGlobalStateFinishMessage) customMsg);
Snapshot snapshot = topSnap.get();
// Topology version does not change, but need create DiscoCache with new state.
DiscoCache discoCache = snapshot.discoCache.copy(snapshot.topVer, ctx.state().clusterState());
topSnap.set(new Snapshot(snapshot.topVer, discoCache));
incMinorTopVer = false;
} else {
incMinorTopVer = ctx.cache().onCustomEvent(customMsg, new AffinityTopologyVersion(topVer, minorTopVer), node);
}
if (incMinorTopVer) {
minorTopVer++;
verChanged = true;
}
nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer);
if (incMinorTopVer)
ctx.cache().onDiscoveryEvent(type, customMsg, node, nextTopVer, ctx.state().clusterState());
} else {
nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer);
ctx.cache().onDiscoveryEvent(type, customMsg, node, nextTopVer, ctx.state().clusterState());
}
DiscoCache discoCache;
// event notifications, since SPI notifies manager about all events from this listener.
if (verChanged) {
Snapshot snapshot = topSnap.get();
if (customMsg == null) {
discoCache = createDiscoCache(nextTopVer, ctx.state().clusterState(), locNode, notification.getTopSnapshot());
} else if (customMsg instanceof ChangeGlobalStateMessage) {
discoCache = createDiscoCache(nextTopVer, ctx.state().pendingState((ChangeGlobalStateMessage) customMsg), locNode, notification.getTopSnapshot());
} else
discoCache = customMsg.createDiscoCache(GridDiscoveryManager.this, nextTopVer, snapshot.discoCache);
discoCacheHist.put(nextTopVer, discoCache);
assert snapshot.topVer.compareTo(nextTopVer) < 0 : "Topology version out of order [this.topVer=" + topSnap + ", topVer=" + topVer + ", node=" + node + ", nextTopVer=" + nextTopVer + ", evt=" + U.gridEventName(type) + ']';
topSnap.set(new Snapshot(nextTopVer, discoCache));
} else
// Current version.
discoCache = discoCache();
if (locJoinEvt || !node.isClient() && !node.isDaemon()) {
if (type == EVT_NODE_LEFT || type == EVT_NODE_FAILED || type == EVT_NODE_JOINED) {
boolean discoCacheRecalculationRequired = ctx.state().autoAdjustInMemoryClusterState(node.id(), notification.getTopSnapshot(), discoCache, topVer, minorTopVer);
if (discoCacheRecalculationRequired) {
discoCache = createDiscoCache(nextTopVer, ctx.state().clusterState(), locNode, notification.getTopSnapshot());
discoCacheHist.put(nextTopVer, discoCache);
topSnap.set(new Snapshot(nextTopVer, discoCache));
}
}
}
if (type == EVT_DISCOVERY_CUSTOM_EVT) {
for (Class cls = customMsg.getClass(); cls != null; cls = cls.getSuperclass()) {
List<CustomEventListener<DiscoveryCustomMessage>> list = customEvtLsnrs.get(cls);
if (list != null) {
for (CustomEventListener<DiscoveryCustomMessage> lsnr : list) {
try {
lsnr.onCustomEvent(nextTopVer, node, customMsg);
} catch (Exception e) {
U.error(log, "Failed to notify direct custom event listener: " + customMsg, e);
}
}
}
}
}
SecurityContext secCtx = remoteSecurityContext(ctx);
// If this is a local join event, just save it and do not notify listeners.
if (locJoinEvt) {
if (gridStartTime == 0)
gridStartTime = getSpi().getGridStartTime();
topSnap.set(new Snapshot(nextTopVer, discoCache));
startLatch.countDown();
DiscoveryEvent discoEvt = new DiscoveryEvent();
discoEvt.node(ctx.discovery().localNode());
discoEvt.eventNode(node);
discoEvt.type(EVT_NODE_JOINED);
discoEvt.topologySnapshot(topVer, new ArrayList<>(F.view(notification.getTopSnapshot(), FILTER_NOT_DAEMON)));
if (notification.getSpanContainer() != null)
discoEvt.span(notification.getSpanContainer().span());
discoWrk.discoCache = discoCache;
if (!ctx.clientDisconnected()) {
// The security processor must be notified first, since {@link IgniteSecurity#onLocalJoin}
// finishes local node security context initialization that can be demanded by other Ignite
// components.
ctx.security().onLocalJoin();
if (!isLocDaemon) {
ctx.cache().context().versions().onLocalJoin(topVer);
ctx.cache().context().coordinators().onLocalJoin(discoEvt, discoCache);
ctx.cache().context().exchange().onLocalJoin(discoEvt, discoCache);
ctx.service().onLocalJoin(discoEvt, discoCache);
ctx.encryption().onLocalJoin();
ctx.cluster().onLocalJoin();
}
}
IgniteInternalFuture<Boolean> transitionWaitFut = ctx.state().onLocalJoin(discoCache);
locJoin.onDone(new DiscoveryLocalJoinData(discoEvt, discoCache, transitionWaitFut, ctx.state().clusterState().active()));
return;
} else if (type == EVT_CLIENT_NODE_DISCONNECTED) {
assert locNode.isClient() : locNode;
assert node.isClient() : node;
((IgniteKernal) ctx.grid()).onDisconnected();
if (!locJoin.isDone())
locJoin.onDone(new IgniteCheckedException("Node disconnected"));
locJoin = new GridFutureAdapter<>();
registeredCaches.clear();
registeredCacheGrps.clear();
for (AffinityTopologyVersion histVer : discoCacheHist.keySet()) {
Object rmvd = discoCacheHist.remove(histVer);
assert rmvd != null : histVer;
}
topHist.clear();
topSnap.set(new Snapshot(AffinityTopologyVersion.ZERO, createDiscoCache(AffinityTopologyVersion.ZERO, ctx.state().clusterState(), locNode, Collections.singleton(locNode))));
} else if (type == EVT_CLIENT_NODE_RECONNECTED) {
assert locNode.isClient() : locNode;
assert node.isClient() : node;
ctx.security().onLocalJoin();
boolean clusterRestarted = gridStartTime != getSpi().getGridStartTime();
gridStartTime = getSpi().getGridStartTime();
((IgniteKernal) ctx.grid()).onReconnected(clusterRestarted);
ctx.cache().context().coordinators().onLocalJoin(localJoinEvent(), discoCache);
ctx.cache().context().exchange().onLocalJoin(localJoinEvent(), discoCache);
ctx.service().onLocalJoin(localJoinEvent(), discoCache);
DiscoCache discoCache0 = discoCache;
ctx.cluster().clientReconnectFuture().listen(new CI1<IgniteFuture<?>>() {
@Override
public void apply(IgniteFuture<?> fut) {
try {
fut.get();
discoWrk.addEvent(new NotificationEvent(EVT_CLIENT_NODE_RECONNECTED, nextTopVer, node, discoCache0, notification.getTopSnapshot(), null, notification.getSpanContainer(), secCtx));
} catch (IgniteException ignore) {
// No-op.
}
}
});
return;
}
if (type == EVT_CLIENT_NODE_DISCONNECTED || type == EVT_NODE_SEGMENTED || !ctx.clientDisconnected())
discoWrk.addEvent(new NotificationEvent(type, nextTopVer, node, discoCache, notification.getTopSnapshot(), customMsg, notification.getSpanContainer(), secCtx));
if (stateFinishMsg != null)
discoWrk.addEvent(new NotificationEvent(EVT_DISCOVERY_CUSTOM_EVT, nextTopVer, node, discoCache, notification.getTopSnapshot(), stateFinishMsg, notification.getSpanContainer(), secCtx));
if (type == EVT_CLIENT_NODE_DISCONNECTED)
discoWrk.awaitDisconnectEvent();
}
/**
* Extends {@link NotificationTask} to run in a security context owned by the initiator of the
* discovery event.
*/
class SecurityAwareNotificationTask extends NotificationTask {
/**
*/
public SecurityAwareNotificationTask(DiscoveryNotification notification) {
super(notification);
}
/**
*/
@Override
public void run() {
DiscoverySpiCustomMessage customMsg = notification.getCustomMsgData();
if (customMsg instanceof SecurityAwareCustomMessageWrapper) {
UUID secSubjId = ((SecurityAwareCustomMessageWrapper) customMsg).securitySubjectId();
try (OperationSecurityContext ignored = ctx.security().withContext(secSubjId)) {
super.run();
}
} else {
SecurityContext initiatorNodeSecCtx = nodeSecurityContext(marshaller, U.resolveClassLoader(ctx.config()), notification.getNode());
try (OperationSecurityContext ignored = ctx.security().withContext(initiatorNodeSecCtx)) {
super.run();
}
}
}
}
/**
* Represents task to handle discovery notification asynchronously.
*/
class NotificationTask implements Runnable {
/**
*/
protected final DiscoveryNotification notification;
/**
*/
public NotificationTask(DiscoveryNotification notification) {
this.notification = notification;
}
/**
* {@inheritDoc}
*/
@Override
public void run() {
synchronized (discoEvtMux) {
onDiscovery0(notification);
}
}
}
});
spi.setDataExchange(new DiscoverySpiDataExchange() {
@Override
public DiscoveryDataBag collect(DiscoveryDataBag dataBag) {
assert dataBag != null;
assert dataBag.joiningNodeId() != null;
if (ctx.localNodeId().equals(dataBag.joiningNodeId())) {
for (GridComponent c : ctx.components()) c.collectJoiningNodeData(dataBag);
} else {
waitForLastStateChangeEventFuture();
for (GridComponent c : ctx.components()) c.collectGridNodeData(dataBag);
}
return dataBag;
}
@Override
public void onExchange(DiscoveryDataBag dataBag) {
assert dataBag != null;
assert dataBag.joiningNodeId() != null;
if (ctx.localNodeId().equals(dataBag.joiningNodeId())) {
// NodeAdded msg reached joining node after round-trip over the ring.
IGridClusterStateProcessor stateProc = ctx.state();
stateProc.onGridDataReceived(dataBag.gridDiscoveryData(stateProc.discoveryDataType().ordinal()));
for (GridComponent c : ctx.components()) {
if (c.discoveryDataType() != null && c != stateProc)
c.onGridDataReceived(dataBag.gridDiscoveryData(c.discoveryDataType().ordinal()));
}
} else {
// Discovery data from newly joined node has to be applied to the current old node.
IGridClusterStateProcessor stateProc = ctx.state();
JoiningNodeDiscoveryData data0 = dataBag.newJoinerDiscoveryData(stateProc.discoveryDataType().ordinal());
assert data0 != null;
stateProc.onJoiningNodeDataReceived(data0);
for (GridComponent c : ctx.components()) {
if (c.discoveryDataType() != null && c != stateProc) {
JoiningNodeDiscoveryData data = dataBag.newJoinerDiscoveryData(c.discoveryDataType().ordinal());
if (data != null)
c.onJoiningNodeDataReceived(data);
}
}
}
}
/**
*/
private void waitForLastStateChangeEventFuture() {
IgniteFuture<?> lastStateChangeEvtLsnrFut = lastStateChangeEvtLsnrFutRef.get();
if (lastStateChangeEvtLsnrFut != null) {
Thread currThread = Thread.currentThread();
GridWorker worker = currThread instanceof IgniteDiscoveryThread ? ((IgniteDiscoveryThread) currThread).worker() : null;
if (worker != null)
worker.blockingSectionBegin();
try {
lastStateChangeEvtLsnrFut.get();
} finally {
// Guaranteed to be invoked in the same thread as DiscoverySpiListener#onDiscovery.
// No additional synchronization for reference is required.
lastStateChangeEvtLsnrFutRef.set(null);
if (worker != null)
worker.blockingSectionEnd();
}
}
}
});
new DiscoveryMessageNotifierThread(discoNtfWrk).start();
startSpi();
registeredDiscoSpi = true;
try {
U.await(startLatch);
} catch (IgniteInterruptedException e) {
throw new IgniteCheckedException("Failed to start discovery manager (thread has been interrupted).", e);
}
// Start segment check worker only if frequency is greater than 0.
if (hasRslvrs && segChkFreq > 0) {
segChkWrk = new SegmentCheckWorker();
segChkThread = new IgniteThread(segChkWrk);
segChkThread.setUncaughtExceptionHandler(new OomExceptionHandler(ctx));
segChkThread.start();
}
locNode = spi.getLocalNode();
checkAttributes(discoCache().remoteNodes());
// Start discovery worker.
new IgniteThread(discoWrk).start();
if (log.isDebugEnabled())
log.debug(startInfo());
}
use of org.apache.ignite.thread.OomExceptionHandler in project ignite by apache.
the class DataStreamProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
if (ctx.config().isDaemon())
return;
marshErrBytes = U.marshal(marsh, new IgniteCheckedException("Failed to marshal response error, " + "see node log for details."));
flusher = new IgniteThread(new GridWorker(ctx.igniteInstanceName(), "grid-data-loader-flusher", log) {
@Override
protected void body() throws InterruptedException {
while (!isCancelled()) {
DataStreamerImpl<K, V> ldr = flushQ.take();
if (!busyLock.enterBusy())
return;
try {
if (ldr.isClosed())
continue;
ldr.tryFlush();
flushQ.offer(ldr);
} finally {
busyLock.leaveBusy();
}
}
}
});
flusher.setUncaughtExceptionHandler(new OomExceptionHandler(ctx));
flusher.start();
if (log.isDebugEnabled())
log.debug("Started data streamer processor.");
}
use of org.apache.ignite.thread.OomExceptionHandler in project ignite by apache.
the class WalStateManager method onProposeExchange.
/**
* Handle propose message which is synchronized with other cache state actions through exchange thread.
* If operation is no-op (i.e. state is not changed), then no additional processing is needed, and coordinator will
* trigger finish request right away. Otherwise all nodes start asynchronous checkpoint flush, and send responses
* to coordinator. Once all responses are received, coordinator node will trigger finish message.
*
* @param msg Message.
*/
public void onProposeExchange(WalStateProposeMessage msg) {
if (!srv)
return;
synchronized (mux) {
WalStateResult res = null;
if (msg.affinityNode()) {
// Affinity node, normal processing.
CacheGroupContext grpCtx = cacheProcessor().cacheGroup(msg.groupId());
if (grpCtx == null) {
// Related caches were destroyed concurrently.
res = new WalStateResult(msg, "Failed to change WAL mode because some caches " + "no longer exist: " + msg.caches().keySet());
} else {
if (F.eq(msg.enable(), grpCtx.globalWalEnabled()))
// Nothing changed -> no-op.
res = new WalStateResult(msg, false);
else {
// Initiate a checkpoint.
CheckpointProgress cpFut = triggerCheckpoint("wal-state-change-grp-" + msg.groupId());
if (cpFut != null) {
try {
// Wait for checkpoint mark synchronously before releasing the control.
cpFut.futureFor(LOCK_RELEASED).get();
if (msg.enable()) {
grpCtx.globalWalEnabled(true);
// Enable: it is enough to release cache operations once mark is finished because
// not-yet-flushed dirty pages have been logged.
WalStateChangeWorker worker = new WalStateChangeWorker(msg, cpFut);
IgniteThread thread = new IgniteThread(worker);
thread.setUncaughtExceptionHandler(new OomExceptionHandler(cctx.kernalContext()));
thread.start();
} else {
// Disable: not-yet-flushed operations are not logged, so wait for them
// synchronously in exchange thread. Otherwise, we cannot define a point in
// when it is safe to continue cache operations.
res = awaitCheckpoint(cpFut, msg);
// WAL state is persisted after checkpoint if finished. Otherwise in case of crash
// and restart we will think that WAL is enabled, but data might be corrupted.
grpCtx.globalWalEnabled(false);
}
} catch (Exception e) {
U.warn(log, "Failed to change WAL mode due to unexpected exception [" + "msg=" + msg + ']', e);
res = new WalStateResult(msg, "Failed to change WAL mode due to unexpected " + "exception (see server logs for more information): " + e.getMessage());
}
} else {
res = new WalStateResult(msg, "Failed to initiate a checkpoint (checkpoint thread " + "is not available).");
}
}
}
} else {
// We cannot know result on non-affinity server node, so just complete operation with "false" flag,
// which will be ignored anyway.
res = new WalStateResult(msg, false);
}
if (res != null) {
addResult(res);
onCompletedLocally(res);
}
}
}
use of org.apache.ignite.thread.OomExceptionHandler in project ignite by apache.
the class GridContinuousProcessor method registerHandler.
/**
* @param nodeId Node ID.
* @param routineId Consume ID.
* @param hnd Handler.
* @param bufSize Buffer size.
* @param interval Time interval.
* @param autoUnsubscribe Automatic unsubscribe flag.
* @param loc Local registration flag.
* @return Whether listener was actually registered.
* @throws IgniteCheckedException In case of error.
*/
private boolean registerHandler(final UUID nodeId, final UUID routineId, final GridContinuousHandler hnd, int bufSize, final long interval, boolean autoUnsubscribe, boolean loc) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert hnd != null;
assert bufSize > 0;
assert interval >= 0;
final RemoteRoutineInfo info = new RemoteRoutineInfo(nodeId, hnd, bufSize, interval, autoUnsubscribe);
boolean doRegister = loc;
if (!doRegister) {
stopLock.lock();
try {
doRegister = !stopped.remove(routineId) && rmtInfos.putIfAbsent(routineId, info) == null;
} finally {
stopLock.unlock();
}
}
if (doRegister) {
if (log.isDebugEnabled())
log.debug("Register handler: [nodeId=" + nodeId + ", routineId=" + routineId + ", info=" + info + ']');
if (interval > 0) {
IgniteThread checker = new IgniteThread(new GridWorker(ctx.igniteInstanceName(), "continuous-buffer-checker", log) {
@Override
protected void body() {
long interval0 = interval;
while (!isCancelled()) {
try {
U.sleep(interval0);
} catch (IgniteInterruptedCheckedException ignored) {
break;
}
IgniteBiTuple<GridContinuousBatch, Long> t = info.checkInterval();
final GridContinuousBatch batch = t.get1();
if (batch != null && batch.size() > 0) {
try {
Collection<Object> toSnd = batch.collect();
boolean msg = toSnd.iterator().next() instanceof Message;
CI1<IgniteException> ackC = new CI1<IgniteException>() {
@Override
public void apply(IgniteException e) {
if (e == null)
info.hnd.onBatchAcknowledged(routineId, batch, ctx);
}
};
sendNotification(nodeId, routineId, null, toSnd, hnd.orderedTopic(), msg, ackC);
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send notification to node (is node alive?): " + nodeId);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send notification to node: " + nodeId, e);
}
}
interval0 = t.get2();
}
}
});
checker.setUncaughtExceptionHandler(new OomExceptionHandler(ctx));
bufCheckThreads.put(routineId, checker);
checker.start();
}
GridContinuousHandler.RegisterStatus status = hnd.register(nodeId, routineId, ctx);
if (status == GridContinuousHandler.RegisterStatus.DELAYED) {
info.markDelayedRegister();
return false;
} else
return status == GridContinuousHandler.RegisterStatus.REGISTERED;
}
return false;
}
Aggregations