use of org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method processFullMessage.
/**
* @param checkCrd If {@code true} checks that local node is exchange coordinator.
* @param node Sender node.
* @param msg Message.
*/
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
try {
assert exchId.equals(msg.exchangeId()) : msg;
assert msg.lastVersion() != null : msg;
timeBag.finishGlobalStage("Waiting for Full message");
if (checkCrd) {
assert node != null;
synchronized (mux) {
if (crd == null) {
if (log.isInfoEnabled())
log.info("Ignore full message, all server nodes left: " + msg);
return;
}
switch(state) {
case CRD:
case BECOME_CRD:
{
if (log.isInfoEnabled())
log.info("Ignore full message, node is coordinator: " + msg);
return;
}
case DONE:
{
if (log.isInfoEnabled())
log.info("Ignore full message, future is done: " + msg);
return;
}
case SRV:
case CLIENT:
{
if (!crd.equals(node)) {
if (log.isInfoEnabled()) {
log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
}
if (node.order() > crd.order())
fullMsgs.put(node, msg);
return;
} else {
if (!F.isEmpty(msg.getErrorsMap())) {
Exception e = msg.getErrorsMap().get(cctx.localNodeId());
if (e instanceof IgniteNeedReconnectException) {
onDone(e);
return;
}
}
AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
if (log.isInfoEnabled()) {
log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
}
finishState = new FinishState(crd.id(), resVer, msg);
state = ExchangeLocalState.DONE;
break;
}
}
}
}
} else
assert node == null : node;
AffinityTopologyVersion resTopVer = initialVersion();
if (exchCtx.mergeExchanges()) {
if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
if (log.isInfoEnabled()) {
log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
}
resTopVer = msg.resultTopologyVersion();
if (cctx.exchange().mergeExchanges(this, msg)) {
assert cctx.kernalContext().isStopping() || cctx.kernalContext().clientDisconnected();
// Node is stopping, no need to further process exchange.
return;
}
assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
}
exchCtx.events().processEvents(this);
if (localJoinExchange()) {
Set<Integer> noAffinityGroups = cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
// Prevent cache usage by a user.
if (!noAffinityGroups.isEmpty()) {
List<GridCacheAdapter> closedCaches = cctx.cache().blockGateways(noAffinityGroups);
closedCaches.forEach(cache -> log.warning("Affinity for cache " + cache.context().name() + " has not received from coordinator during local join. " + " Probably cache is already stopped but not processed on local node yet." + " Cache proxy will be closed for user interactions for safety."));
}
} else {
if (exchCtx.events().hasServerLeft())
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
continue;
grp.topology().beforeExchange(this, true, false);
}
}
} else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer);
else if (forceAffReassignment)
cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff());
timeBag.finishGlobalStage("Affinity recalculation");
if (dynamicCacheStartExchange() && !F.isEmpty(exchangeGlobalExceptions)) {
assert cctx.localNode().isClient();
// TODO: https://issues.apache.org/jira/browse/IGNITE-8796
// The current exchange has been successfully completed on all server nodes,
// but has failed on that client node for some reason.
// It looks like that we need to rollback dynamically started caches on the client node,
// complete DynamicCacheStartFutures (if they are registered) with the cause of that failure
// and complete current exchange without errors.
onDone(exchangeLocE);
return;
}
updatePartitionFullMap(resTopVer, msg);
if (msg.rebalanced())
markRebalanced();
if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap()))
cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions())
markAffinityReassign();
}
onDone(resTopVer, null);
} catch (IgniteCheckedException e) {
onDone(e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method finishExchangeOnCoordinator.
/**
* @param sndResNodes Additional nodes to send finish message to.
*/
private void finishExchangeOnCoordinator(@Nullable Collection<ClusterNode> sndResNodes) {
if (isDone() || !enterBusy())
return;
try {
if (!F.isEmpty(exchangeGlobalExceptions) && dynamicCacheStartExchange() && isRollbackSupported()) {
sendExchangeFailureMessage();
return;
}
AffinityTopologyVersion resTopVer = exchCtx.events().topologyVersion();
if (log.isInfoEnabled()) {
log.info("finishExchangeOnCoordinator [topVer=" + initialVersion() + ", resVer=" + resTopVer + ']');
}
Map<Integer, CacheGroupAffinityMessage> idealAffDiff = null;
// Reserve at least 2 threads for system operations.
int parallelismLvl = U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2);
if (exchCtx.mergeExchanges()) {
synchronized (mux) {
if (mergedJoinExchMsgs != null) {
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : mergedJoinExchMsgs.entrySet()) {
msgs.put(e.getKey(), e.getValue());
updatePartitionSingleMap(e.getKey(), e.getValue());
}
}
}
assert exchCtx.events().hasServerJoin() || exchCtx.events().hasServerLeft();
exchCtx.events().processEvents(this);
if (exchCtx.events().hasServerLeft())
idealAffDiff = cctx.affinity().onServerLeftWithExchangeMergeProtocol(this);
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, true);
doInParallel(parallelismLvl, cctx.kernalContext().pools().getSystemExecutorService(), cctx.affinity().cacheGroups().values(), desc -> {
if (desc.config().getCacheMode() == CacheMode.LOCAL)
return null;
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), events().discoveryCache());
top.beforeExchange(this, true, true);
return null;
});
}
span.addLog(() -> "Affinity recalculation (crd)");
timeBag.finishGlobalStage("Affinity recalculation (crd)");
Map<Integer, CacheGroupAffinityMessage> joinedNodeAff = new ConcurrentHashMap<>(cctx.cache().cacheGroups().size());
doInParallel(parallelismLvl, cctx.kernalContext().pools().getSystemExecutorService(), msgs.values(), msg -> {
processSingleMessageOnCrdFinish(msg, joinedNodeAff);
return null;
});
timeBag.finishGlobalStage("Collect update counters and create affinity messages");
if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
assert firstDiscoEvt instanceof DiscoveryCustomEvent;
if (activateCluster() || changedBaseline())
assignPartitionsStates(null);
DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
if (discoveryCustomMessage instanceof DynamicCacheChangeBatch) {
if (exchActions != null) {
Set<String> caches = exchActions.cachesToResetLostPartitions();
if (!F.isEmpty(caches))
resetLostPartitions(caches);
Set<Integer> cacheGroupsToResetOwners = concat(exchActions.cacheGroupsToStart().stream().map(grp -> grp.descriptor().groupId()), exchActions.cachesToResetLostPartitions().stream().map(CU::cacheId)).collect(Collectors.toSet());
assignPartitionsStates(cacheGroupsToResetOwners);
}
} else if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) discoveryCustomMessage).needAssignPartitions()) {
markAffinityReassign();
assignPartitionsStates(null);
}
} else if (exchCtx.events().hasServerJoin())
assignPartitionsStates(null);
else if (exchCtx.events().hasServerLeft())
assignPartitionsStates(emptySet());
// Validation should happen after resetting owners to avoid false desync reporting.
validatePartitionsState();
// Recalculate new affinity based on partitions availability.
if (!exchCtx.mergeExchanges() && forceAffReassignment) {
idealAffDiff = cctx.affinity().onCustomEventWithEnforcedAffinityReassignment(this);
timeBag.finishGlobalStage("Ideal affinity diff calculation (enforced)");
}
for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) {
if (!grpCtx.isLocal())
grpCtx.topology().applyUpdateCounters();
}
timeBag.finishGlobalStage("Apply update counters");
updateLastVersion(cctx.versions().last());
cctx.versions().onExchange(lastVer.get().order());
IgniteProductVersion minVer = exchCtx.events().discoveryCache().minimumNodeVersion();
GridDhtPartitionsFullMessage msg = createPartitionsMessage(true, minVer.compareToIgnoreTimestamp(PARTIAL_COUNTERS_MAP_SINCE) >= 0);
if (!cctx.affinity().rebalanceRequired() && !deactivateCluster())
msg.rebalanced(true);
if (exchCtx.mergeExchanges()) {
assert !centralizedAff;
msg.resultTopologyVersion(resTopVer);
if (exchCtx.events().hasServerLeft())
msg.idealAffinityDiff(idealAffDiff);
} else if (forceAffReassignment)
msg.idealAffinityDiff(idealAffDiff);
msg.prepareMarshal(cctx);
timeBag.finishGlobalStage("Full message preparing");
synchronized (mux) {
finishState = new FinishState(crd.id(), resTopVer, msg);
state = ExchangeLocalState.DONE;
}
if (centralizedAff) {
assert !exchCtx.mergeExchanges();
IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>> fut = cctx.affinity().initAffinityOnNodeLeft(this);
if (!fut.isDone())
fut.listen(this::onAffinityInitialized);
else
onAffinityInitialized(fut);
} else {
Set<ClusterNode> nodes;
Map<UUID, GridDhtPartitionsSingleMessage> mergedJoinExchMsgs0;
synchronized (mux) {
srvNodes.remove(cctx.localNode());
nodes = new LinkedHashSet<>(srvNodes);
mergedJoinExchMsgs0 = mergedJoinExchMsgs;
if (mergedJoinExchMsgs != null) {
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : mergedJoinExchMsgs.entrySet()) {
if (e.getValue() != null) {
ClusterNode node = cctx.discovery().node(e.getKey());
if (node != null)
nodes.add(node);
}
}
} else
mergedJoinExchMsgs0 = Collections.emptyMap();
if (!F.isEmpty(sndResNodes))
nodes.addAll(sndResNodes);
}
if (msg.rebalanced())
markRebalanced();
if (!nodes.isEmpty())
sendAllPartitions(msg, nodes, mergedJoinExchMsgs0, joinedNodeAff);
timeBag.finishGlobalStage("Full message sending");
discoveryLag = calculateDiscoveryLag(msgs, mergedJoinExchMsgs0);
if (!stateChangeExchange())
onDone(exchCtx.events().topologyVersion(), null);
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : pendingSingleMsgs.entrySet()) {
if (log.isInfoEnabled()) {
log.info("Process pending message on coordinator [node=" + e.getKey() + ", ver=" + initialVersion() + ", resVer=" + resTopVer + ']');
}
processSingleMessage(e.getKey(), e.getValue());
}
}
if (stateChangeExchange()) {
StateChangeRequest req = exchActions.stateChangeRequest();
assert req != null : exchActions;
boolean stateChangeErr = false;
if (!F.isEmpty(exchangeGlobalExceptions)) {
stateChangeErr = true;
cctx.kernalContext().state().onStateChangeError(exchangeGlobalExceptions, req);
} else {
boolean hasMoving = !partsToReload.isEmpty();
Set<Integer> waitGrps = cctx.affinity().waitGroups();
if (!hasMoving) {
for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) {
if (waitGrps.contains(grpCtx.groupId()) && grpCtx.topology().hasMovingPartitions()) {
hasMoving = true;
break;
}
}
}
cctx.kernalContext().state().onExchangeFinishedOnCoordinator(this, hasMoving);
}
if (!cctx.kernalContext().state().clusterState().localBaselineAutoAdjustment()) {
ClusterState state = stateChangeErr ? ClusterState.INACTIVE : req.state();
ChangeGlobalStateFinishMessage stateFinishMsg = new ChangeGlobalStateFinishMessage(req.requestId(), state, !stateChangeErr);
cctx.discovery().sendCustomEvent(stateFinishMsg);
}
timeBag.finishGlobalStage("State finish message sending");
if (!centralizedAff)
onDone(exchCtx.events().topologyVersion(), null);
}
// Try switch late affinity right now if an exchange has been completed normally.
if (!centralizedAff && isDone() && error() == null && !cctx.kernalContext().isStopping())
cctx.exchange().checkRebalanceState();
} catch (IgniteCheckedException e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else
onDone(e);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method onDiscoveryEvent.
/**
* @param evt Event.
* @param cache Discovery data cache.
*/
private void onDiscoveryEvent(DiscoveryEvent evt, DiscoCache cache) {
ClusterNode loc = cctx.localNode();
assert evt.type() == EVT_NODE_JOINED || evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED || evt.type() == EVT_DISCOVERY_CUSTOM_EVT;
final ClusterNode n = evt.eventNode();
GridDhtPartitionExchangeId exchId = null;
GridDhtPartitionsExchangeFuture exchFut = null;
if (evt.type() != EVT_DISCOVERY_CUSTOM_EVT) {
assert evt.type() != EVT_NODE_JOINED || n.isLocal() || n.order() > loc.order() : "Node joined with smaller-than-local " + "order [newOrder=" + n.order() + ", locOrder=" + loc.order() + ", evt=" + evt + ']';
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
ExchangeActions exchActs = null;
boolean locJoin = evt.type() == EVT_NODE_JOINED && evt.eventNode().isLocal();
if (locJoin) {
LocalJoinCachesContext locJoinCtx = cctx.cache().localJoinCachesContext();
if (locJoinCtx != null) {
exchActs = new ExchangeActions();
exchActs.localJoinContext(locJoinCtx);
}
}
if (!n.isClient() && !n.isDaemon())
exchActs = cctx.kernalContext().state().autoAdjustExchangeActions(exchActs);
exchFut = exchangeFuture(exchId, evt, cache, exchActs, null);
} else {
DiscoveryCustomMessage customMsg = ((DiscoveryCustomEvent) evt).customMessage();
if (customMsg instanceof ChangeGlobalStateMessage) {
ChangeGlobalStateMessage stateChangeMsg = (ChangeGlobalStateMessage) customMsg;
ExchangeActions exchActions = stateChangeMsg.exchangeActions();
if (exchActions != null) {
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
exchFut = exchangeFuture(exchId, evt, cache, exchActions, null);
boolean baselineChanging;
if (stateChangeMsg.forceChangeBaselineTopology())
baselineChanging = true;
else {
DiscoveryDataClusterState state = cctx.kernalContext().state().clusterState();
assert state.transition() : state;
baselineChanging = exchActions.changedBaseline() || // Or it is the first activation.
state.state() != ClusterState.INACTIVE && !state.previouslyActive() && state.previousBaselineTopology() == null;
}
exchFut.listen(f -> onClusterStateChangeFinish(f, exchActions, baselineChanging));
}
} else if (customMsg instanceof DynamicCacheChangeBatch) {
DynamicCacheChangeBatch batch = (DynamicCacheChangeBatch) customMsg;
ExchangeActions exchActions = batch.exchangeActions();
if (exchActions != null) {
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
exchFut = exchangeFuture(exchId, evt, cache, exchActions, null);
}
} else if (customMsg instanceof CacheAffinityChangeMessage) {
CacheAffinityChangeMessage msg = (CacheAffinityChangeMessage) customMsg;
if (msg.exchangeId() == null) {
if (msg.exchangeNeeded()) {
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
exchFut = exchangeFuture(exchId, evt, cache, null, msg);
}
} else if (msg.exchangeId().topologyVersion().topologyVersion() >= cctx.discovery().localJoinEvent().topologyVersion())
exchangeFuture(msg.exchangeId(), null, null, null, null).onAffinityChangeMessage(evt.eventNode(), msg);
} else if (customMsg instanceof DynamicCacheChangeFailureMessage) {
DynamicCacheChangeFailureMessage msg = (DynamicCacheChangeFailureMessage) customMsg;
if (msg.exchangeId().topologyVersion().topologyVersion() >= affinityTopologyVersion(cctx.discovery().localJoinEvent()).topologyVersion())
exchangeFuture(msg.exchangeId(), null, null, null, null).onDynamicCacheChangeFail(evt.eventNode(), msg);
} else if (customMsg instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage) customMsg).needExchange()) {
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
exchFut = exchangeFuture(exchId, evt, null, null, null);
} else if (customMsg instanceof WalStateAbstractMessage && ((WalStateAbstractMessage) customMsg).needExchange()) {
exchId = exchangeId(n.id(), affinityTopologyVersion(evt), evt);
exchFut = exchangeFuture(exchId, evt, null, null, null);
} else {
// Process event as custom discovery task if needed.
CachePartitionExchangeWorkerTask task = cctx.cache().exchangeTaskForCustomDiscoveryMessage(customMsg);
if (task != null)
exchWorker.addCustomTask(task);
}
}
if (exchId != null) {
if (log.isDebugEnabled())
log.debug("Discovery event (will start exchange): " + exchId);
// Event callback - without this callback future will never complete.
exchFut.onEvent(exchId, evt, cache);
Span span = cctx.kernalContext().tracing().create(EXCHANGE_FUTURE, evt.span());
if (exchId != null) {
GridDhtPartitionExchangeId exchIdf = exchId;
span.addTag(SpanTags.tag(SpanTags.EVENT_NODE, SpanTags.ID), () -> evt.eventNode().id().toString());
span.addTag(SpanTags.tag(SpanTags.EVENT_NODE, SpanTags.CONSISTENT_ID), () -> evt.eventNode().consistentId().toString());
span.addTag(SpanTags.tag(SpanTags.EVENT, SpanTags.TYPE), () -> String.valueOf(evt.type()));
span.addTag(SpanTags.tag(SpanTags.EXCHANGE, SpanTags.ID), () -> String.valueOf(exchIdf.toString()));
span.addTag(SpanTags.tag(SpanTags.INITIAL, SpanTags.TOPOLOGY_VERSION, SpanTags.MAJOR), () -> String.valueOf(exchIdf.topologyVersion().topologyVersion()));
span.addTag(SpanTags.tag(SpanTags.INITIAL, SpanTags.TOPOLOGY_VERSION, SpanTags.MINOR), () -> String.valueOf(exchIdf.topologyVersion().minorTopologyVersion()));
}
span.addTag(SpanTags.NODE_ID, () -> cctx.localNodeId().toString());
span.addLog(() -> "Created");
exchFut.span(span);
// Start exchange process.
addFuture(exchFut);
} else {
if (log.isDebugEnabled())
log.debug("Do not start exchange for discovery event: " + evt);
}
notifyNodeFail(evt);
// Notify indexing engine about node leave so that we can re-map coordinator accordingly.
if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) {
SecurityContext secCtx = remoteSecurityContext(cctx.kernalContext());
exchWorker.addCustomTask(new SchemaNodeLeaveExchangeWorkerTask(secCtx, evt.eventNode()));
exchWorker.addCustomTask(new WalStateNodeLeaveExchangeTask(secCtx, evt.eventNode()));
}
}
use of org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method init.
/**
* Starts activity.
*
* @param newCrd {@code True} if node become coordinator on this exchange.
* @throws IgniteInterruptedCheckedException If interrupted.
*/
public void init(boolean newCrd) throws IgniteInterruptedCheckedException {
if (isDone())
return;
assert !cctx.kernalContext().isDaemon();
cctx.exchange().exchangerBlockingSectionBegin();
try {
U.await(evtLatch);
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
assert firstDiscoEvt != null : this;
assert exchId.nodeId().equals(firstDiscoEvt.eventNode().id()) : this;
try {
AffinityTopologyVersion topVer = initialVersion();
srvNodes = new ArrayList<>(firstEvtDiscoCache.serverNodes());
remaining.addAll(F.nodeIds(F.view(srvNodes, F.remoteNodes(cctx.localNodeId()))));
crd = srvNodes.isEmpty() ? null : srvNodes.get(0);
boolean crdNode = crd != null && crd.isLocal();
exchCtx = new ExchangeContext(cctx, crdNode, this);
cctx.exchange().exchangerBlockingSectionBegin();
assert state == null : state;
if (crdNode)
state = ExchangeLocalState.CRD;
else
state = cctx.kernalContext().clientNode() ? ExchangeLocalState.CLIENT : ExchangeLocalState.SRV;
initTime = System.currentTimeMillis();
if (exchLog.isInfoEnabled()) {
exchLog.info("Started exchange init [topVer=" + topVer + ", crd=" + crdNode + ", evt=" + IgniteUtils.gridEventName(firstDiscoEvt.type()) + ", evtNode=" + firstDiscoEvt.eventNode().id() + ", customEvt=" + (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT ? ((DiscoveryCustomEvent) firstDiscoEvt).customMessage() : null) + ", allowMerge=" + exchCtx.mergeExchanges() + ", exchangeFreeSwitch=" + exchCtx.exchangeFreeSwitch() + ']');
}
span.addLog(() -> "Exchange parameters initialization");
timeBag.finishGlobalStage("Exchange parameters initialization");
ExchangeType exchange;
if (exchCtx.exchangeFreeSwitch()) {
if (isSnapshotOperation(firstDiscoEvt)) {
// Keep if the cluster was rebalanced.
if (wasRebalanced())
markRebalanced();
if (!forceAffReassignment)
cctx.affinity().onCustomMessageNoAffinityChange(this, exchActions);
exchange = cctx.kernalContext().clientNode() ? ExchangeType.NONE : ExchangeType.ALL;
} else
exchange = onExchangeFreeSwitchNodeLeft();
initCoordinatorCaches(newCrd);
} else if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
assert !exchCtx.mergeExchanges();
DiscoveryCustomMessage msg = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage();
forceAffReassignment = DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(msg) && firstEventCache().minimumNodeVersion().compareToIgnoreTimestamp(FORCE_AFF_REASSIGNMENT_SINCE) >= 0;
if (msg instanceof ChangeGlobalStateMessage) {
assert exchActions != null && !exchActions.empty();
exchange = onClusterStateChangeRequest(crdNode);
} else if (msg instanceof DynamicCacheChangeBatch) {
assert exchActions != null && !exchActions.empty();
exchange = onCacheChangeRequest(crdNode);
} else if (msg instanceof SnapshotDiscoveryMessage)
exchange = onCustomMessageNoAffinityChange();
else if (msg instanceof WalStateAbstractMessage)
exchange = onCustomMessageNoAffinityChange();
else {
assert affChangeMsg != null : this;
exchange = onAffinityChangeRequest();
}
if (forceAffReassignment)
cctx.affinity().onCentralizedAffinityChange(this, crdNode);
initCoordinatorCaches(newCrd);
} else {
if (firstDiscoEvt.type() == EVT_NODE_JOINED) {
if (!firstDiscoEvt.eventNode().isLocal()) {
Collection<DynamicCacheDescriptor> receivedCaches = cctx.cache().startReceivedCaches(firstDiscoEvt.eventNode().id(), topVer);
registerCachesFuture = cctx.affinity().initStartedCaches(crdNode, this, receivedCaches);
} else
registerCachesFuture = initCachesOnLocalJoin();
}
initCoordinatorCaches(newCrd);
if (exchCtx.mergeExchanges()) {
if (localJoinExchange()) {
if (cctx.kernalContext().clientNode()) {
onClientNodeEvent();
exchange = ExchangeType.CLIENT;
} else {
onServerNodeEvent(crdNode);
exchange = ExchangeType.ALL;
}
} else {
if (firstDiscoEvt.eventNode().isClient())
exchange = onClientNodeEvent();
else
exchange = cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL;
}
if (exchId.isLeft())
onLeft();
} else {
exchange = firstDiscoEvt.eventNode().isClient() ? onClientNodeEvent() : onServerNodeEvent(crdNode);
}
}
cctx.cache().registrateProxyRestart(resolveCacheRequests(exchActions), afterLsnrCompleteFut);
exchangeType = exchange;
for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onInitBeforeTopologyLock(this);
updateTopologies(crdNode);
timeBag.finishGlobalStage("Determine exchange type");
switch(exchange) {
case ALL:
{
distributedExchange();
break;
}
case CLIENT:
{
if (!exchCtx.mergeExchanges() && exchCtx.fetchAffinityOnJoin())
initTopologies();
clientOnlyExchange();
break;
}
case NONE:
{
initTopologies();
synchronized (mux) {
state = ExchangeLocalState.DONE;
}
onDone(topVer);
break;
}
default:
assert false;
}
if (cctx.localNode().isClient()) {
cctx.exchange().exchangerBlockingSectionBegin();
try {
tryToPerformLocalSnapshotOperation();
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
}
for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onInitAfterTopologyLock(this);
// invoked prior to onDoneBeforeTopologyUnlock.
if (exchange == ExchangeType.ALL && context().exchangeFreeSwitch()) {
cctx.exchange().exchangerBlockingSectionBegin();
try {
onDone(initialVersion());
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
}
if (exchLog.isInfoEnabled())
exchLog.info("Finished exchange init [topVer=" + topVer + ", crd=" + crdNode + ']');
} catch (IgniteInterruptedCheckedException e) {
assert cctx.kernalContext().isStopping() || cctx.kernalContext().clientDisconnected();
if (cctx.kernalContext().clientDisconnected())
onDone(new IgniteCheckedException("Client disconnected"));
else
onDone(new IgniteCheckedException("Node stopped"));
throw e;
} catch (IgniteNeedReconnectException e) {
onDone(e);
} catch (Throwable e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else {
U.error(log, "Failed to reinitialize local partitions (rebalancing will be stopped): " + exchId, e);
onDone(e);
}
if (e instanceof Error)
throw (Error) e;
}
}
Aggregations