use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtPartitionSupplyMessage method finishUnmarshal.
/**
* {@inheritDoc}
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
super.finishUnmarshal(ctx, ldr);
CacheGroupContext grp = ctx.cache().cacheGroup(grpId);
for (CacheEntryInfoCollection col : infos().values()) {
List<GridCacheEntryInfo> entries = col.infos();
for (int i = 0; i < entries.size(); i++) entries.get(i).unmarshal(grp.cacheObjectContext(), ldr);
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method processFullMessage.
/**
* @param checkCrd If {@code true} checks that local node is exchange coordinator.
* @param node Sender node.
* @param msg Message.
*/
private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtPartitionsFullMessage msg) {
try {
assert exchId.equals(msg.exchangeId()) : msg;
assert msg.lastVersion() != null : msg;
if (checkCrd) {
assert node != null;
synchronized (mux) {
if (crd == null) {
if (log.isInfoEnabled())
log.info("Ignore full message, all server nodes left: " + msg);
return;
}
switch(state) {
case CRD:
case BECOME_CRD:
{
if (log.isInfoEnabled())
log.info("Ignore full message, node is coordinator: " + msg);
return;
}
case DONE:
{
if (log.isInfoEnabled())
log.info("Ignore full message, future is done: " + msg);
return;
}
case SRV:
case CLIENT:
{
if (!crd.equals(node)) {
if (log.isInfoEnabled()) {
log.info("Received full message from non-coordinator [node=" + node.id() + ", nodeOrder=" + node.order() + ", crd=" + crd.id() + ", crdOrder=" + crd.order() + ']');
}
if (node.order() > crd.order())
fullMsgs.put(node, msg);
return;
} else {
AffinityTopologyVersion resVer = msg.resultTopologyVersion() != null ? msg.resultTopologyVersion() : initialVersion();
if (log.isInfoEnabled()) {
log.info("Received full message, will finish exchange [node=" + node.id() + ", resVer=" + resVer + ']');
}
finishState = new FinishState(crd.id(), resVer, msg);
state = ExchangeLocalState.DONE;
break;
}
}
}
}
} else
assert node == null : node;
AffinityTopologyVersion resTopVer = initialVersion();
if (exchCtx.mergeExchanges()) {
if (msg.resultTopologyVersion() != null && !initialVersion().equals(msg.resultTopologyVersion())) {
if (log.isInfoEnabled()) {
log.info("Received full message, need merge [curFut=" + initialVersion() + ", resVer=" + msg.resultTopologyVersion() + ']');
}
resTopVer = msg.resultTopologyVersion();
if (cctx.exchange().mergeExchanges(this, msg)) {
assert cctx.kernalContext().isStopping();
// Node is stopping, no need to further process exchange.
return;
}
if (hasMergedExchanges())
updateTopologies(false);
assert resTopVer.equals(exchCtx.events().topologyVersion()) : "Unexpected result version [" + "msgVer=" + resTopVer + ", locVer=" + exchCtx.events().topologyVersion() + ']';
}
exchCtx.events().processEvents(this);
if (localJoinExchange())
cctx.affinity().onLocalJoin(this, msg, resTopVer);
else {
if (exchCtx.events().hasServerLeft())
cctx.affinity().applyAffinityFromFullMessage(this, msg);
else
cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false);
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal() || cacheGroupStopping(grp.groupId()))
continue;
grp.topology().beforeExchange(this, true, false);
}
}
} else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin())
cctx.affinity().onLocalJoin(this, msg, resTopVer);
else if (forceAffReassignment)
cctx.affinity().applyAffinityFromFullMessage(this, msg);
updatePartitionFullMap(resTopVer, msg);
IgniteCheckedException err = null;
if (stateChangeExchange() && !F.isEmpty(msg.getErrorsMap())) {
err = new IgniteCheckedException("Cluster state change failed");
cctx.kernalContext().state().onStateChangeError(msg.getErrorsMap(), exchActions.stateChangeRequest());
}
onDone(resTopVer, err);
} catch (IgniteCheckedException e) {
onDone(e);
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method detectLostPartitions.
/**
* Detect lost partitions.
*
* @param resTopVer Result topology version.
*/
private void detectLostPartitions(AffinityTopologyVersion resTopVer) {
boolean detected = false;
synchronized (cctx.exchange().interruptLock()) {
if (Thread.currentThread().isInterrupted())
return;
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
boolean detectedOnGrp = grp.topology().detectLostPartitions(resTopVer, events().lastEvent());
detected |= detectedOnGrp;
}
}
}
if (detected)
cctx.exchange().scheduleResendPartitions();
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method updateTopologies.
/**
* Updates topology versions and discovery caches on all topologies.
*
* @param crd Coordinator flag.
* @throws IgniteCheckedException If failed.
*/
private void updateTopologies(boolean crd) throws IgniteCheckedException {
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal())
continue;
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();
GridDhtPartitionTopology top = grp.topology();
if (crd) {
boolean updateTop = exchId.topologyVersion().equals(grp.localStartVersion());
if (updateTop && clientTop != null) {
top.update(null, clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.emptySet(), null);
}
}
top.updateTopologyVersion(this, events().discoveryCache(), updSeq, cacheGroupStopping(grp.groupId()));
}
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) top.updateTopologyVersion(this, events().discoveryCache(), -1, cacheGroupStopping(top.groupId()));
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method onAllReceived.
/**
* @param sndResNodes Additional nodes to send finish message to.
*/
private void onAllReceived(@Nullable Collection<ClusterNode> sndResNodes) {
try {
assert crd.isLocal();
assert partHistSuppliers.isEmpty() : partHistSuppliers;
if (!exchCtx.mergeExchanges() && !crd.equals(events().discoveryCache().serverNodes().get(0))) {
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal())
grp.topology().beforeExchange(this, !centralizedAff && !forceAffReassignment, false);
}
}
if (exchCtx.mergeExchanges()) {
if (log.isInfoEnabled())
log.info("Coordinator received all messages, try merge [ver=" + initialVersion() + ']');
boolean finish = cctx.exchange().mergeExchangesOnCoordinator(this);
// Synchronize in case of changed coordinator (thread switched to sys-*)
synchronized (mux) {
if (hasMergedExchanges())
updateTopologies(true);
}
if (!finish)
return;
}
finishExchangeOnCoordinator(sndResNodes);
} catch (IgniteCheckedException e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else
onDone(e);
}
}
Aggregations