use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId in project ignite by apache.
the class GridCachePartitionExchangeManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
super.start0();
exchWorker = new ExchangeWorker();
latchMgr = new ExchangeLatchManager(cctx.kernalContext());
cctx.gridEvents().addDiscoveryEventListener(discoLsnr, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED, EVT_DISCOVERY_CUSTOM_EVT);
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleMessage.class, new MessageHandler<GridDhtPartitionsSingleMessage>() {
@Override
public void onMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
GridDhtPartitionExchangeId exchangeId = msg.exchangeId();
if (exchangeId != null) {
GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchangeId);
boolean fastReplied = fut.fastReplyOnSingleMessage(node, msg);
if (fastReplied) {
if (log.isInfoEnabled())
log.info("Fast replied to single message " + "[exchId=" + exchangeId + ", nodeId=" + node.id() + "]");
return;
}
} else {
GridDhtPartitionsExchangeFuture cur = lastTopologyFuture();
if (!cur.isDone() && cur.changedAffinity() && !msg.restoreState()) {
cur.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (fut.error() == null)
processSinglePartitionUpdate(node, msg);
}
});
return;
}
}
processSinglePartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsFullMessage.class, new MessageHandler<GridDhtPartitionsFullMessage>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsFullMessage msg) {
if (msg.exchangeId() == null) {
GridDhtPartitionsExchangeFuture currentExchange = lastTopologyFuture();
if (currentExchange != null && currentExchange.addOrMergeDelayedFullMessage(node, msg)) {
if (log.isInfoEnabled()) {
log.info("Delay process full message without exchange id (there is exchange in progress) " + "[nodeId=" + node.id() + "]");
}
return;
}
}
processFullPartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleRequest.class, new MessageHandler<GridDhtPartitionsSingleRequest>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsSingleRequest msg) {
processSinglePartitionRequest(node, msg);
}
});
if (!cctx.kernalContext().clientNode()) {
for (int cnt = 0; cnt < cctx.gridConfig().getRebalanceThreadPoolSize(); cnt++) {
final int idx = cnt;
cctx.io().addOrderedCacheGroupHandler(cctx, rebalanceTopic(cnt), new CI2<UUID, GridCacheGroupIdMessage>() {
@Override
public void apply(final UUID id, final GridCacheGroupIdMessage m) {
if (!enterBusy())
return;
try {
CacheGroupContext grp = cctx.cache().cacheGroup(m.groupId());
if (grp != null) {
if (m instanceof GridDhtPartitionSupplyMessage) {
grp.preloader().handleSupplyMessage(id, (GridDhtPartitionSupplyMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandMessage) {
grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandLegacyMessage) {
grp.preloader().handleDemandMessage(idx, id, new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage) m));
return;
} else
U.error(log, "Unsupported message type: " + m.getClass().getName());
}
U.warn(log, "Cache group with id=" + m.groupId() + " is stopped or absent");
} finally {
leaveBusy();
}
}
});
}
}
MetricRegistry mreg = cctx.kernalContext().metric().registry(PME_METRICS);
mreg.register(PME_DURATION, () -> currentPMEDuration(false), "Current PME duration in milliseconds.");
mreg.register(PME_OPS_BLOCKED_DURATION, () -> currentPMEDuration(true), "Current PME cache operations blocked duration in milliseconds.");
durationHistogram = mreg.findMetric(PME_DURATION_HISTOGRAM);
blockingDurationHistogram = mreg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
MetricRegistry clusterReg = cctx.kernalContext().metric().registry(CLUSTER_METRICS);
rebalanced = clusterReg.booleanMetric(REBALANCED, "True if the cluster has fully achieved rebalanced state. Note that an inactive cluster always has" + " this metric in False regardless of the real partitions state.");
startLatch.countDown();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId in project ignite by apache.
the class GridDhtPartitionTopologyImpl method initPartitions.
/**
* Creates and initializes partitions using given {@code affVer} and {@code affAssignment}.
*
* @param affVer Affinity version to use.
* @param affAssignment Affinity assignment to use.
* @param exchFut Exchange future.
* @param updateSeq Update sequence.
* @return {@code True} if partitions must be refreshed.
*/
private boolean initPartitions(AffinityTopologyVersion affVer, List<List<ClusterNode>> affAssignment, GridDhtPartitionsExchangeFuture exchFut, long updateSeq) {
boolean needRefresh = false;
if (grp.affinityNode()) {
ClusterNode loc = ctx.localNode();
ClusterNode oldest = discoCache.oldestAliveServerNode();
GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
int partitions = grp.affinity().partitions();
if (grp.rebalanceEnabled()) {
boolean added = exchFut.cacheGroupAddedOnExchange(grp.groupId(), grp.receivedFrom());
boolean first = added || (loc.equals(oldest) && loc.id().equals(exchId.nodeId()) && exchId.isJoined()) || exchFut.activateCluster();
if (first) {
assert exchId.isJoined() || added || exchFut.activateCluster();
if (log.isDebugEnabled()) {
String reason;
if (exchId.isJoined())
reason = "First node in cluster";
else if (added)
reason = "Cache group added";
else
reason = "Cluster activate";
log.debug("Initialize partitions (" + reason + ")" + " [grp=" + grp.cacheOrGroupName() + "]");
}
for (int p = 0; p < partitions; p++) {
if (localNode(p, affAssignment)) {
// Partition is created first time, so it's safe to own it.
boolean shouldOwn = locParts.get(p) == null;
GridDhtLocalPartition locPart = getOrCreatePartition(p);
if (shouldOwn) {
locPart.own();
if (log.isDebugEnabled())
log.debug("Partition has been owned (created first time) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + locPart.id() + ']');
}
needRefresh = true;
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
} else {
// Apply partitions not belonging by affinity to partition map.
GridDhtLocalPartition locPart = locParts.get(p);
if (locPart != null) {
needRefresh = true;
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
}
}
}
} else
createPartitions(affVer, affAssignment, updateSeq);
} else {
// the partitions this node is not responsible for.
for (int p = 0; p < partitions; p++) {
GridDhtLocalPartition locPart = localPartition0(p, affVer, false, true);
boolean belongs = localNode(p, affAssignment);
if (locPart != null) {
if (!belongs) {
GridDhtPartitionState state = locPart.state();
if (state.active()) {
locPart.rent();
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
if (log.isDebugEnabled()) {
log.debug("Evicting partition with rebalancing disabled (it does not belong to " + "affinity) [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
}
}
} else {
locPart.own();
// Make sure partition map is initialized.
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
}
} else if (belongs) {
locPart = getOrCreatePartition(p);
locPart.own();
updateLocal(p, locPart.state(), updateSeq, affVer);
}
}
}
}
updateRebalanceVersion(affVer, affAssignment);
return needRefresh;
}
Aggregations