use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage in project ignite by apache.
the class GridExchangeFreeSwitchTest method startPmeMessagesCounting.
/**
* @param nodes Nodes.
* @param singleCnt Counter for GridDhtPartitionsSingleMessage.
* @param fullCnt Counter for GridDhtPartitionsFullMessage.
*/
private void startPmeMessagesCounting(int nodes, AtomicLong singleCnt, AtomicLong fullCnt) {
for (int i = 0; i < nodes; i++) {
TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite(i).configuration().getCommunicationSpi();
spi.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg.getClass().equals(GridDhtPartitionsSingleMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
singleCnt.incrementAndGet();
if (msg.getClass().equals(GridDhtPartitionsFullMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
fullCnt.incrementAndGet();
}
});
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage in project ignite by apache.
the class CacheLateAffinityAssignmentTest method concurrentStartStaticCaches.
/**
* @param withClients If {@code true} also starts client nodes.
* @throws Exception If failed.
*/
private void concurrentStartStaticCaches(boolean withClients) throws Exception {
cacheC = new IgniteClosure<String, CacheConfiguration[]>() {
@Override
public CacheConfiguration[] apply(String igniteInstanceName) {
int caches = getTestIgniteInstanceIndex(igniteInstanceName) + 1;
CacheConfiguration[] ccfgs = new CacheConfiguration[caches];
for (int i = 0; i < caches; i++) {
CacheConfiguration ccfg = cacheConfiguration();
ccfg.setName("cache-" + i);
ccfgs[i] = ccfg;
}
return ccfgs;
}
};
if (withClients) {
clientC = new IgniteClosure<String, Boolean>() {
@Override
public Boolean apply(String igniteInstanceName) {
int idx = getTestIgniteInstanceIndex(igniteInstanceName);
return idx % 3 == 2;
}
};
}
int ITERATIONS = 3;
int NODES = withClients ? 8 : 5;
for (int i = 0; i < ITERATIONS; i++) {
log.info("Iteration: " + i);
TestRecordingCommunicationSpi[] testSpis = new TestRecordingCommunicationSpi[NODES];
for (int j = 0; j < NODES; j++) {
testSpis[j] = new TestRecordingCommunicationSpi();
testSpis[j].blockMessages((node, msg) -> msg instanceof GridDhtPartitionsSingleMessage);
}
// Ensure exchanges merge.
spiC = igniteInstanceName -> testSpis[getTestIgniteInstanceIndex(igniteInstanceName)];
GridTestUtils.runAsync(() -> {
try {
for (int j = 1; j < NODES; j++) testSpis[j].waitForBlocked();
} catch (InterruptedException e) {
log.error("Thread interrupted.", e);
}
for (TestRecordingCommunicationSpi testSpi : testSpis) testSpi.stopBlock();
});
startGridsMultiThreaded(NODES);
for (int t = 0; t < NODES; t++) calculateAffinity(t + 1, true, null);
if (withClients) {
skipCheckOrder = true;
checkAffinity(NODES, topVer(NODES, 0), false);
} else
checkAffinity(NODES, topVer(NODES, 1), true);
if (i < ITERATIONS - 1) {
checkCaches();
awaitPartitionMapExchange();
stopAllGrids();
idealAff.clear();
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* @param exchangeId Exchange ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(@Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, GridPartitionStateMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal() && (exchActions == null || !exchActions.cacheGroupStopping(grp.groupId()))) {
GridDhtPartitionMap locMap = grp.topology().localPartitionMap();
addPartitionMap(m, dupData, true, grp.groupId(), locMap, grp.affinity().similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = grp.topology().localUpdateCounters(true);
m.addPartitionUpdateCounters(grp.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.groupId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.groupId(), locMap, top.similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = top.localUpdateCounters(true);
m.addPartitionUpdateCounters(top.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method mergeExchanges.
/**
* @param curFut Current exchange future.
* @param msg Message.
* @return {@code True} if node is stopping.
* @throws IgniteInterruptedCheckedException If interrupted.
*/
public boolean mergeExchanges(final GridDhtPartitionsExchangeFuture curFut, GridDhtPartitionsFullMessage msg) throws IgniteInterruptedCheckedException {
AffinityTopologyVersion resVer = msg.resultTopologyVersion();
if (exchWorker.waitForExchangeFuture(resVer))
return true;
for (CachePartitionExchangeWorkerTask task : exchWorker.futQ) {
if (task instanceof GridDhtPartitionsExchangeFuture) {
GridDhtPartitionsExchangeFuture fut = (GridDhtPartitionsExchangeFuture) task;
if (fut.initialVersion().compareTo(resVer) > 0) {
if (log.isInfoEnabled()) {
log.info("Merge exchange future on finish stop [curFut=" + curFut.initialVersion() + ", resVer=" + resVer + ", nextFutVer=" + fut.initialVersion() + ']');
}
break;
}
if (log.isInfoEnabled()) {
log.info("Merge exchange future on finish [curFut=" + curFut.initialVersion() + ", mergedFut=" + fut.initialVersion() + ", evt=" + IgniteUtils.gridEventName(fut.firstEvent().type()) + ", evtNode=" + fut.firstEvent().eventNode().id() + ", evtNodeClient=" + fut.firstEvent().eventNode().isClient() + ']');
}
DiscoveryEvent evt = fut.firstEvent();
curFut.context().events().addEvent(fut.initialVersion(), fut.firstEvent(), fut.firstEventCache());
if (evt.type() == EVT_NODE_JOINED) {
final GridDhtPartitionsSingleMessage pendingMsg = fut.mergeJoinExchangeOnDone(curFut);
if (pendingMsg != null) {
if (log.isInfoEnabled()) {
log.info("Merged join exchange future on finish, will reply to node [" + "curFut=" + curFut.initialVersion() + ", mergedFut=" + fut.initialVersion() + ", evtNode=" + evt.eventNode().id() + ']');
}
curFut.waitAndReplyToNode(evt.eventNode().id(), pendingMsg);
}
}
}
}
ExchangeDiscoveryEvents evts = curFut.context().events();
assert evts.topologyVersion().equals(resVer) : "Invalid exchange merge result [ver=" + evts.topologyVersion() + ", expVer=" + resVer + ']';
return false;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
super.start0();
exchWorker = new ExchangeWorker();
latchMgr = new ExchangeLatchManager(cctx.kernalContext());
cctx.gridEvents().addDiscoveryEventListener(discoLsnr, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED, EVT_DISCOVERY_CUSTOM_EVT);
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleMessage.class, new MessageHandler<GridDhtPartitionsSingleMessage>() {
@Override
public void onMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
GridDhtPartitionExchangeId exchangeId = msg.exchangeId();
if (exchangeId != null) {
GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchangeId);
boolean fastReplied = fut.fastReplyOnSingleMessage(node, msg);
if (fastReplied) {
if (log.isInfoEnabled())
log.info("Fast replied to single message " + "[exchId=" + exchangeId + ", nodeId=" + node.id() + "]");
return;
}
} else {
GridDhtPartitionsExchangeFuture cur = lastTopologyFuture();
if (!cur.isDone() && cur.changedAffinity() && !msg.restoreState()) {
cur.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (fut.error() == null)
processSinglePartitionUpdate(node, msg);
}
});
return;
}
}
processSinglePartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsFullMessage.class, new MessageHandler<GridDhtPartitionsFullMessage>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsFullMessage msg) {
if (msg.exchangeId() == null) {
GridDhtPartitionsExchangeFuture currentExchange = lastTopologyFuture();
if (currentExchange != null && currentExchange.addOrMergeDelayedFullMessage(node, msg)) {
if (log.isInfoEnabled()) {
log.info("Delay process full message without exchange id (there is exchange in progress) " + "[nodeId=" + node.id() + "]");
}
return;
}
}
processFullPartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleRequest.class, new MessageHandler<GridDhtPartitionsSingleRequest>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsSingleRequest msg) {
processSinglePartitionRequest(node, msg);
}
});
if (!cctx.kernalContext().clientNode()) {
for (int cnt = 0; cnt < cctx.gridConfig().getRebalanceThreadPoolSize(); cnt++) {
final int idx = cnt;
cctx.io().addOrderedCacheGroupHandler(cctx, rebalanceTopic(cnt), new CI2<UUID, GridCacheGroupIdMessage>() {
@Override
public void apply(final UUID id, final GridCacheGroupIdMessage m) {
if (!enterBusy())
return;
try {
CacheGroupContext grp = cctx.cache().cacheGroup(m.groupId());
if (grp != null) {
if (m instanceof GridDhtPartitionSupplyMessage) {
grp.preloader().handleSupplyMessage(id, (GridDhtPartitionSupplyMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandMessage) {
grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandLegacyMessage) {
grp.preloader().handleDemandMessage(idx, id, new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage) m));
return;
} else
U.error(log, "Unsupported message type: " + m.getClass().getName());
}
U.warn(log, "Cache group with id=" + m.groupId() + " is stopped or absent");
} finally {
leaveBusy();
}
}
});
}
}
MetricRegistry mreg = cctx.kernalContext().metric().registry(PME_METRICS);
mreg.register(PME_DURATION, () -> currentPMEDuration(false), "Current PME duration in milliseconds.");
mreg.register(PME_OPS_BLOCKED_DURATION, () -> currentPMEDuration(true), "Current PME cache operations blocked duration in milliseconds.");
durationHistogram = mreg.findMetric(PME_DURATION_HISTOGRAM);
blockingDurationHistogram = mreg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
MetricRegistry clusterReg = cctx.kernalContext().metric().registry(CLUSTER_METRICS);
rebalanced = clusterReg.booleanMetric(REBALANCED, "True if the cluster has fully achieved rebalanced state. Note that an inactive cluster always has" + " this metric in False regardless of the real partitions state.");
startLatch.countDown();
}
Aggregations