use of org.apache.ignite.internal.events.DiscoveryCustomEvent in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method init.
/**
* Starts activity.
*
* @throws IgniteInterruptedCheckedException If interrupted.
*/
public void init() throws IgniteInterruptedCheckedException {
if (isDone())
return;
initTs = U.currentTimeMillis();
U.await(evtLatch);
assert discoEvt != null : this;
assert exchId.nodeId().equals(discoEvt.eventNode().id()) : this;
assert !dummy && !forcePreload : this;
try {
discoCache.updateAlives(cctx.discovery());
AffinityTopologyVersion topVer = topologyVersion();
srvNodes = new ArrayList<>(discoCache.serverNodes());
remaining.addAll(F.nodeIds(F.view(srvNodes, F.remoteNodes(cctx.localNodeId()))));
crd = srvNodes.isEmpty() ? null : srvNodes.get(0);
boolean crdNode = crd != null && crd.isLocal();
skipPreload = cctx.kernalContext().clientNode();
ExchangeType exchange;
if (discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
DiscoveryCustomMessage msg = ((DiscoveryCustomEvent) discoEvt).customMessage();
if (msg instanceof DynamicCacheChangeBatch) {
assert exchActions != null && !exchActions.empty();
exchange = onCacheChangeRequest(crdNode);
} else if (msg instanceof StartFullSnapshotAckDiscoveryMessage)
exchange = CU.clientNode(discoEvt.eventNode()) ? onClientNodeEvent(crdNode) : onServerNodeEvent(crdNode);
else {
assert affChangeMsg != null : this;
exchange = onAffinityChangeRequest(crdNode);
}
} else {
if (discoEvt.type() == EVT_NODE_JOINED) {
if (!discoEvt.eventNode().isLocal()) {
Collection<DynamicCacheDescriptor> receivedCaches = cctx.cache().startReceivedCaches(discoEvt.eventNode().id(), topVer);
cctx.affinity().initStartedCaches(crdNode, this, receivedCaches);
} else
cctx.cache().startCachesOnLocalJoin(topVer);
}
exchange = CU.clientNode(discoEvt.eventNode()) ? onClientNodeEvent(crdNode) : onServerNodeEvent(crdNode);
}
updateTopologies(crdNode);
if (exchActions != null && exchActions.hasStop())
cctx.cache().context().database().beforeCachesStop();
switch(exchange) {
case ALL:
{
distributedExchange();
break;
}
case CLIENT:
{
initTopologies();
clientOnlyExchange();
break;
}
case NONE:
{
initTopologies();
onDone(topVer);
break;
}
default:
assert false;
}
} catch (IgniteInterruptedCheckedException e) {
onDone(e);
throw e;
} catch (IgniteNeedReconnectException e) {
onDone(e);
} catch (Throwable e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else {
U.error(log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e);
onDone(e);
}
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.events.DiscoveryCustomEvent in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method processFullMessage.
/**
* @param checkCrd If {@code true} checks that local node is exchange coordinator.
* @param node Sender node.
* @param msg Message.
*/
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
try {
assert exchId.equals(msg.exchangeId()) : msg;
assert msg.lastVersion() != null : msg;
timeBag.finishGlobalStage("Waiting for Full message");
if (checkCrd) {
assert node != null;
synchronized (mux) {
if (crd == null) {
if (log.isInfoEnabled())
log.info("Ignore full message, all server nodes left: " + msg);
return;
}
switch(state) {
case CRD:
case BECOME_CRD:
{
if (log.isInfoEnabled())
log.info("Ignore full message, node is coordinator: " + msg);
return;
}
case DONE:
{
if (log.isInfoEnabled())
log.info("Ignore full message, future is done: " + msg);
return;
}
case SRV:
case CLIENT:
{
if (!crd.equals(node)) {
if (log.isInfoEnabled()) {
log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
}
if (node.order() > crd.order())
fullMsgs.put(node, msg);
return;
} else {
if (!F.isEmpty(msg.getErrorsMap())) {
Exception e = msg.getErrorsMap().get(cctx.localNodeId());
if (e instanceof IgniteNeedReconnectException) {
onDone(e);
return;
}
}
AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
if (log.isInfoEnabled()) {
log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
}
finishState = new FinishState(crd.id(), resVer, msg);
state = ExchangeLocalState.DONE;
break;
}
}
}
}
} else
assert node == null : node;
AffinityTopologyVersion resTopVer = initialVersion();
if (exchCtx.mergeExchanges()) {
if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
if (log.isInfoEnabled()) {
log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
}
resTopVer = msg.resultTopologyVersion();
if (cctx.exchange().mergeExchanges(this, msg)) {
assert cctx.kernalContext().isStopping() || cctx.kernalContext().clientDisconnected();
// Node is stopping, no need to further process exchange.
return;
}
assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
}
exchCtx.events().processEvents(this);
if (localJoinExchange()) {
Set<Integer> noAffinityGroups = cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
// Prevent cache usage by a user.
if (!noAffinityGroups.isEmpty()) {
List<GridCacheAdapter> closedCaches = cctx.cache().blockGateways(noAffinityGroups);
closedCaches.forEach(cache -> log.warning("Affinity for cache " + cache.context().name() + " has not received from coordinator during local join. " + " Probably cache is already stopped but not processed on local node yet." + " Cache proxy will be closed for user interactions for safety."));
}
} else {
if (exchCtx.events().hasServerLeft())
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
continue;
grp.topology().beforeExchange(this, true, false);
}
}
} else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
else if (forceAffReassignment)
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
timeBag.finishGlobalStage("Affinity recalculation");
if (dynamicCacheStartExchange() && !F.isEmpty(exchangeGlobalExceptions)) {
assert cctx.localNode().isClient();
// TODO: https://issues.apache.org/jira/browse/IGNITE-8796
// The current exchange has been successfully completed on all server nodes,
// but has failed on that client node for some reason.
// It looks like that we need to rollback dynamically started caches on the client node,
// complete DynamicCacheStartFutures (if they are registered) with the cause of that failure
// and complete current exchange without errors.
onDone(exchangeLocE);
return;
}
updatePartitionFullMap(resTopVer, msg);
if (msg.rebalanced())
markRebalanced();
if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap()))
cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions())
markAffinityReassign();
}
onDone(resTopVer, null);
} catch (IgniteCheckedException e) {
onDone(e);
}
}
use of org.apache.ignite.internal.events.DiscoveryCustomEvent in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method onDone.
/**
* {@inheritDoc}
*/
@Override
public boolean onDone(@Nullable AffinityTopologyVersion res, @Nullable Throwable err) {
assert res != null || err != null : "TopVer=" + res + ", err=" + err;
if (isDone() || !done.compareAndSet(false, true))
return false;
if (log.isInfoEnabled()) {
log.info("Finish exchange future [startVer=" + initialVersion() + ", resVer=" + res + ", err=" + err + ", rebalanced=" + rebalanced() + ", wasRebalanced=" + wasRebalanced() + ']');
}
if (res != null) {
span.addTag(SpanTags.tag(SpanTags.RESULT, SpanTags.TOPOLOGY_VERSION, SpanTags.MAJOR), () -> String.valueOf(res.topologyVersion()));
span.addTag(SpanTags.tag(SpanTags.RESULT, SpanTags.TOPOLOGY_VERSION, SpanTags.MINOR), () -> String.valueOf(res.minorTopologyVersion()));
}
if (err != null) {
Throwable errf = err;
span.addTag(SpanTags.ERROR, errf::toString);
}
boolean cleanIdxRebuildFutures = true;
try {
waitUntilNewCachesAreRegistered();
if (err == null && !cctx.kernalContext().clientNode() && (serverNodeDiscoveryEvent() || affChangeMsg != null)) {
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (!cacheCtx.affinityNode() || cacheCtx.isLocal())
continue;
cacheCtx.continuousQueries().flushOnExchangeDone(res);
}
}
if (err == null) {
if (centralizedAff || forceAffReassignment) {
assert !exchCtx.mergeExchanges();
Collection<CacheGroupContext> grpToRefresh = U.newHashSet(cctx.cache().cacheGroups().size());
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal())
continue;
try {
if (grp.topology().initPartitionsWhenAffinityReady(res, this))
grpToRefresh.add(grp);
} catch (IgniteInterruptedCheckedException e) {
U.error(log, "Failed to initialize partitions.", e);
}
}
if (!grpToRefresh.isEmpty()) {
if (log.isDebugEnabled())
log.debug("Refresh partitions due to partitions initialized when affinity ready [" + grpToRefresh.stream().map(CacheGroupContext::name).collect(Collectors.toList()) + ']');
cctx.exchange().refreshPartitions(grpToRefresh);
}
}
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;
if (drCacheCtx.isDrEnabled()) {
try {
drCacheCtx.dr().onExchange(res, exchId.isLeft(), activateCluster());
} catch (IgniteCheckedException e) {
U.error(log, "Failed to notify DR: " + e, e);
}
}
}
if (exchCtx.events().hasServerLeft() || activateCluster())
detectLostPartitions(res);
Map<Integer, CacheGroupValidation> m = U.newHashMap(cctx.cache().cacheGroups().size());
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
CacheGroupValidation valRes = validateCacheGroup(grp, events().lastEvent().topologyNodes());
if (!valRes.isValid() || valRes.hasLostPartitions())
m.put(grp.groupId(), valRes);
}
grpValidRes = m;
}
if (!cctx.localNode().isClient())
tryToPerformLocalSnapshotOperation();
if (err == null)
cctx.coordinators().onExchangeDone(events().discoveryCache());
for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onDoneBeforeTopologyUnlock(this);
// Create and destroy caches and cache proxies.
cctx.cache().onExchangeDone(initialVersion(), exchActions, err);
Map<T2<Integer, Integer>, Long> localReserved = partHistSuppliers.getReservations(cctx.localNodeId());
if (localReserved != null) {
boolean success = cctx.database().reserveHistoryForPreloading(localReserved);
if (!success) {
log.warning("Could not reserve history for historical rebalance " + "(possible it happened because WAL space is exhausted).");
}
}
cctx.database().releaseHistoryForExchange();
if (err == null) {
cctx.database().rebuildIndexesIfNeeded(this);
cleanIdxRebuildFutures = false;
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal())
grp.topology().onExchangeDone(this, grp.affinity().readyAffinity(res), false);
}
if (changedAffinity())
cctx.walState().disableGroupDurabilityForPreloading(this);
}
} catch (Throwable t) {
// In any case, this exchange future has to be completed. The original error should be preserved if exists.
if (err != null)
t.addSuppressed(err);
err = t;
}
final Throwable err0 = err;
if (err0 != null && cleanIdxRebuildFutures)
cctx.kernalContext().query().removeIndexRebuildFuturesOnExchange(this, null);
// Should execute this listener first, before any external listeners.
// Listeners use stack as data structure.
listen(f -> {
// Update last finished future in the first.
cctx.exchange().lastFinishedFuture(this);
// Complete any affReady futures and update last exchange done version.
cctx.exchange().onExchangeDone(res, initialVersion(), err0);
cctx.cache().completeProxyRestart(resolveCacheRequests(exchActions), initialVersion(), res);
if (exchActions != null && err0 == null)
exchActions.completeRequestFutures(cctx, null);
if (stateChangeExchange() && err0 == null)
cctx.kernalContext().state().onStateChangeExchangeDone(exchActions.stateChangeRequest());
});
if (super.onDone(res, err)) {
afterLsnrCompleteFut.onDone();
span.addLog(() -> "Completed partition exchange");
span.end();
if (err == null) {
updateDurationHistogram(System.currentTimeMillis() - initTime);
cctx.exchange().clusterRebalancedMetric().value(rebalanced());
}
if (log.isInfoEnabled()) {
log.info("Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange=" + (log.isDebugEnabled() ? this : shortInfo()) + ", topVer=" + topologyVersion() + "]");
if (err == null) {
timeBag.finishGlobalStage("Exchange done");
// Collect all stages timings.
List<String> timings = timeBag.stagesTimings();
if (discoveryLag != null && discoveryLag.get1() != 0)
timings.add("Discovery lag=" + discoveryLag.get1() + " ms, Latest started node id=" + discoveryLag.get2());
log.info(exchangeTimingsLogMessage("Exchange timings", timings));
List<String> localTimings = timeBag.longestLocalStagesTimings(3);
log.info(exchangeTimingsLogMessage("Exchange longest local stages", localTimings));
}
}
initFut.onDone(err == null);
cctx.exchange().latch().dropClientLatches(initialVersion());
if (exchCtx != null && exchCtx.events().hasServerLeft()) {
ExchangeDiscoveryEvents evts = exchCtx.events();
for (DiscoveryEvent evt : evts.events()) {
if (serverLeftEvent(evt)) {
for (CacheGroupContext grp : cctx.cache().cacheGroups()) grp.affinityFunction().removeNode(evt.eventNode().id());
}
}
}
for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onDoneAfterTopologyUnlock(this);
if (firstDiscoEvt instanceof DiscoveryCustomEvent)
((DiscoveryCustomEvent) firstDiscoEvt).customMessage(null);
if (err == null) {
if (exchCtx != null && (exchCtx.events().hasServerLeft() || exchCtx.events().hasServerJoin())) {
ExchangeDiscoveryEvents evts = exchCtx.events();
for (DiscoveryEvent evt : evts.events()) {
if (serverLeftEvent(evt) || serverJoinEvent(evt))
logExchange(evt);
}
}
}
return true;
}
return false;
}
use of org.apache.ignite.internal.events.DiscoveryCustomEvent in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method finishExchangeOnCoordinator.
/**
* @param sndResNodes Additional nodes to send finish message to.
*/
private void finishExchangeOnCoordinator(@Nullable Collection<ClusterNode> sndResNodes) {
if (isDone() || !enterBusy())
return;
try {
if (!F.isEmpty(exchangeGlobalExceptions) && dynamicCacheStartExchange() && isRollbackSupported()) {
sendExchangeFailureMessage();
return;
}
AffinityTopologyVersion resTopVer = exchCtx.events().topologyVersion();
if (log.isInfoEnabled()) {
log.info("finishExchangeOnCoordinator [topVer=" + initialVersion() + ", resVer=" + resTopVer + ']');
}
Map<Integer, CacheGroupAffinityMessage> idealAffDiff = null;
// Reserve at least 2 threads for system operations.
int parallelismLvl = U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2);
if (exchCtx.mergeExchanges()) {
synchronized (mux) {
if (mergedJoinExchMsgs != null) {
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : mergedJoinExchMsgs.entrySet()) {
msgs.put(e.getKey(), e.getValue());
updatePartitionSingleMap(e.getKey(), e.getValue());
}
}
}
assert exchCtx.events().hasServerJoin() || exchCtx.events().hasServerLeft();
exchCtx.events().processEvents(this);
if (exchCtx.events().hasServerLeft())
idealAffDiff = cctx.affinity().onServerLeftWithExchangeMergeProtocol(this);
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, true);
doInParallel(parallelismLvl, cctx.kernalContext().pools().getSystemExecutorService(), cctx.affinity().cacheGroups().values(), desc -> {
if (desc.config().getCacheMode() == CacheMode.LOCAL)
return null;
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), events().discoveryCache());
top.beforeExchange(this, true, true);
return null;
});
}
span.addLog(() -> "Affinity recalculation (crd)");
timeBag.finishGlobalStage("Affinity recalculation (crd)");
Map<Integer, CacheGroupAffinityMessage> joinedNodeAff = new ConcurrentHashMap<>(cctx.cache().cacheGroups().size());
doInParallel(parallelismLvl, cctx.kernalContext().pools().getSystemExecutorService(), msgs.values(), msg -> {
processSingleMessageOnCrdFinish(msg, joinedNodeAff);
return null;
});
timeBag.finishGlobalStage("Collect update counters and create affinity messages");
if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
assert firstDiscoEvt instanceof DiscoveryCustomEvent;
if (activateCluster() || changedBaseline())
assignPartitionsStates(null);
DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
if (discoveryCustomMessage instanceof DynamicCacheChangeBatch) {
if (exchActions != null) {
Set<String> caches = exchActions.cachesToResetLostPartitions();
if (!F.isEmpty(caches))
resetLostPartitions(caches);
Set<Integer> cacheGroupsToResetOwners = concat(exchActions.cacheGroupsToStart().stream().map(grp -> grp.descriptor().groupId()), exchActions.cachesToResetLostPartitions().stream().map(CU::cacheId)).collect(Collectors.toSet());
assignPartitionsStates(cacheGroupsToResetOwners);
}
} else if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions()) {
markAffinityReassign();
assignPartitionsStates(null);
}
} else if (exchCtx.events().hasServerJoin())
assignPartitionsStates(null);
else if (exchCtx.events().hasServerLeft())
assignPartitionsStates(emptySet());
// Validation should happen after resetting owners to avoid false desync reporting.
validatePartitionsState();
// Recalculate new affinity based on partitions availability.
if (!exchCtx.mergeExchanges() && forceAffReassignment) {
idealAffDiff = cctx.affinity().onCustomEventWithEnforcedAffinityReassignment(this);
timeBag.finishGlobalStage("Ideal affinity diff calculation (enforced)");
}
for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) {
if (!grpCtx.isLocal())
grpCtx.topology().applyUpdateCounters();
}
timeBag.finishGlobalStage("Apply update counters");
updateLastVersion(cctx.versions().last());
cctx.versions().onExchange(lastVer.get().order());
IgniteProductVersion minVer = exchCtx.events().discoveryCache().minimumNodeVersion();
GridDhtPartitionsFullMessage msg = createPartitionsMessage(true, minVer.compareToIgnoreTimestamp(PARTIAL_COUNTERS_MAP_SINCE) >= 0);
if (!cctx.affinity().rebalanceRequired() && !deactivateCluster())
msg.rebalanced(true);
if (exchCtx.mergeExchanges()) {
assert !centralizedAff;
msg.resultTopologyVersion(resTopVer);
if (exchCtx.events().hasServerLeft())
msg.idealAffinityDiff(idealAffDiff);
} else if (forceAffReassignment)
msg.idealAffinityDiff(idealAffDiff);
msg.prepareMarshal(cctx);
timeBag.finishGlobalStage("Full message preparing");
synchronized (mux) {
finishState = new FinishState(crd.id(), resTopVer, msg);
state = ExchangeLocalState.DONE;
}
if (centralizedAff) {
assert !exchCtx.mergeExchanges();
IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>> fut = cctx.affinity().initAffinityOnNodeLeft(this);
if (!fut.isDone())
fut.listen(this::onAffinityInitialized);
else
onAffinityInitialized(fut);
} else {
Set<ClusterNode> nodes;
Map<UUID, GridDhtPartitionsSingleMessage> mergedJoinExchMsgs0;
synchronized (mux) {
srvNodes.remove(cctx.localNode());
nodes = new LinkedHashSet<>(srvNodes);
mergedJoinExchMsgs0 = mergedJoinExchMsgs;
if (mergedJoinExchMsgs != null) {
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : mergedJoinExchMsgs.entrySet()) {
if (e.getValue() != null) {
ClusterNode node = cctx.discovery().node(e.getKey());
if (node != null)
nodes.add(node);
}
}
} else
mergedJoinExchMsgs0 = Collections.emptyMap();
if (!F.isEmpty(sndResNodes))
nodes.addAll(sndResNodes);
}
if (msg.rebalanced())
markRebalanced();
if (!nodes.isEmpty())
sendAllPartitions(msg, nodes, mergedJoinExchMsgs0, joinedNodeAff);
timeBag.finishGlobalStage("Full message sending");
discoveryLag = calculateDiscoveryLag(msgs, mergedJoinExchMsgs0);
if (!stateChangeExchange())
onDone(exchCtx.events().topologyVersion(), null);
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : pendingSingleMsgs.entrySet()) {
if (log.isInfoEnabled()) {
log.info("Process pending message on coordinator [node=" + e.getKey() + ", ver=" + initialVersion() + ", resVer=" + resTopVer + ']');
}
processSingleMessage(e.getKey(), e.getValue());
}
}
if (stateChangeExchange()) {
StateChangeRequest req = exchActions.stateChangeRequest();
assert req != null : exchActions;
boolean stateChangeErr = false;
if (!F.isEmpty(exchangeGlobalExceptions)) {
stateChangeErr = true;
cctx.kernalContext().state().onStateChangeError(exchangeGlobalExceptions, req);
} else {
boolean hasMoving = !partsToReload.isEmpty();
Set<Integer> waitGrps = cctx.affinity().waitGroups();
if (!hasMoving) {
for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) {
if (waitGrps.contains(grpCtx.groupId()) && grpCtx.topology().hasMovingPartitions()) {
hasMoving = true;
break;
}
}
}
cctx.kernalContext().state().onExchangeFinishedOnCoordinator(this, hasMoving);
}
if (!cctx.kernalContext().state().clusterState().localBaselineAutoAdjustment()) {
ClusterState state = stateChangeErr ? ClusterState.INACTIVE : req.state();
ChangeGlobalStateFinishMessage stateFinishMsg = new ChangeGlobalStateFinishMessage(req.requestId(), state, !stateChangeErr);
cctx.discovery().sendCustomEvent(stateFinishMsg);
}
timeBag.finishGlobalStage("State finish message sending");
if (!centralizedAff)
onDone(exchCtx.events().topologyVersion(), null);
}
// Try switch late affinity right now if an exchange has been completed normally.
if (!centralizedAff && isDone() && error() == null && !cctx.kernalContext().isStopping())
cctx.exchange().checkRebalanceState();
} catch (IgniteCheckedException e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else
onDone(e);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.events.DiscoveryCustomEvent in project ignite by apache.
the class ServiceDeploymentManager method copyIfNeeded.
/**
* Clones some instances of {@link DiscoveryCustomEvent} to capture necessary data, to avoid custom messages's
* nullifying by {@link GridDhtPartitionsExchangeFuture#onDone}.
*
* @param evt Discovery event.
* @return Discovery event to process.
*/
private DiscoveryCustomEvent copyIfNeeded(@NotNull DiscoveryCustomEvent evt) {
DiscoveryCustomMessage msg = evt.customMessage();
assert msg != null : "DiscoveryCustomMessage has been nullified concurrently, evt=" + evt;
if (msg instanceof ServiceChangeBatchRequest)
return evt;
DiscoveryCustomEvent cp = new DiscoveryCustomEvent();
cp.node(evt.node());
cp.customMessage(msg);
cp.eventNode(evt.eventNode());
cp.affinityTopologyVersion(evt.affinityTopologyVersion());
return cp;
}
Aggregations