use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class CacheLateAffinityAssignmentTest method doTestCoordLeaveBlockedFinishExchangeMessage.
/**
* Coordinator leaves without sending all {@link GridDhtPartitionsFullMessage} messages,
* exchange must be completed.
*
* @param cnt Number of nodes.
* @param stopId Node to stop.
* @param lastClient {@code True} if last started node is client.
* @param blockedIds Nodes not receiving exchange finish message.
* @throws Exception If failed.
*/
private void doTestCoordLeaveBlockedFinishExchangeMessage(int cnt, int stopId, boolean lastClient, int... blockedIds) throws Exception {
int ord = 1;
for (int i = 0; i < cnt; i++) {
if (i == cnt - 1 && lastClient)
startClient(ord - 1, ord++);
else
startServer(ord - 1, ord++);
}
awaitPartitionMapExchange();
TestRecordingCommunicationSpi spi0 = TestRecordingCommunicationSpi.spi(grid(0));
final Set<String> blocked = new HashSet<>();
for (int id : blockedIds) {
String name = grid(id).name();
blocked.add(name);
}
spi0.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
return blocked.contains(node.attribute(IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME)) && (msg instanceof GridDhtPartitionsFullMessage) && (((GridDhtPartitionsFullMessage) msg).exchangeId() != null);
}
});
AffinityTopologyVersion currentTop = ignite(0).context().cache().context().exchange().readyAffinityVersion();
checkAffinity(cnt, currentTop, true);
stopNode(stopId, ord);
AffinityTopologyVersion topVer = topVer(ord, 0);
List<IgniteInternalFuture<?>> futs = new ArrayList<>(cnt);
List<Ignite> grids = G.allGrids();
for (Ignite ignite : grids) futs.add(affinityReadyFuture(topVer, ignite));
assertEquals(futs.size(), grids.size());
for (int i = 0; i < futs.size(); i++) {
final IgniteInternalFuture<?> fut = futs.get(i);
Ignite ignite = grids.get(i);
if (!blocked.contains(ignite.name())) {
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return fut.isDone();
}
}, 5000);
assertTrue(ignite.name(), fut.isDone());
} else
assertFalse(ignite.name(), fut.isDone());
}
ord++;
// Triggers exchange completion from new coordinator.
stopNode(0, ord);
checkAffinity(cnt - 2, topVer(ord - 1, 0), true, false);
checkAffinity(cnt - 2, topVer(ord, 0), true);
awaitPartitionMapExchange();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class TxRollbackOnTimeoutTest method doTestRollbackOnTimeoutTxRemap.
/**
* @param concurrency Concurrency.
* @param isolation Isolation.
* @param clientWait {@code True} to wait remap on client, otherwise wait remap on server.
*/
private void doTestRollbackOnTimeoutTxRemap(TransactionConcurrency concurrency, TransactionIsolation isolation, boolean clientWait) throws Exception {
IgniteEx client = (IgniteEx) startClient();
Ignite crd = grid(0);
assertTrue(crd.cluster().localNode().order() == 1);
List<Integer> keys = movingKeysAfterJoin(grid(1), CACHE_NAME, 1);
// Delay exchange finish on server nodes if clientWait=true, or on all nodes otherwise (excluding joining node).
spi(crd).blockMessages((node, msg) -> node.order() < 5 && msg instanceof GridDhtPartitionsFullMessage && (!clientWait || node.order() != grid(1).cluster().localNode().order()));
// Delay prepare until exchange is finished.
spi(client).blockMessages((node, msg) -> {
boolean block = false;
if (concurrency == PESSIMISTIC) {
if (msg instanceof GridNearLockRequest) {
block = true;
assertEquals(GRID_CNT + 1, ((GridNearLockRequest) msg).topologyVersion().topologyVersion());
}
} else {
if (msg instanceof GridNearTxPrepareRequest) {
block = true;
assertEquals(GRID_CNT + 1, ((GridNearTxPrepareRequest) msg).topologyVersion().topologyVersion());
}
}
return block;
});
// Start tx and map on topver=GRID_CNT + 1
// Delay map until exchange.
// Start new node.
IgniteInternalFuture fut0 = runAsync(new Runnable() {
@Override
public void run() {
try (Transaction tx = client.transactions().txStart(concurrency, isolation, 5000, 1)) {
client.cache(CACHE_NAME).put(keys.get(0), 0);
tx.commit();
fail();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
}
});
IgniteInternalFuture fut1 = runAsync(new Runnable() {
@Override
public void run() {
try {
// TX is trying to prepare on prev top ver.
spi(client).waitForBlocked();
startGrid(GRID_CNT);
} catch (Exception e) {
fail(e.getMessage());
}
}
});
IgniteInternalFuture fut2 = runAsync(new Runnable() {
@Override
public void run() {
try {
// Wait for all full messages to be ready.
spi(crd).waitForBlocked(GRID_CNT + (clientWait ? 0 : 1));
// Trigger remap.
spi(client).stopBlock();
} catch (Exception e) {
fail(e.getMessage());
}
}
});
fut0.get(30_000);
fut1.get(30_000);
fut2.get(30_000);
spi(crd).stopBlock();
// FIXME: If using awaitPartitionMapExchange for waiting it some times fail while waiting for owners.
IgniteInternalFuture<?> topFut = ((IgniteEx) client).context().cache().context().exchange().affinityReadyFuture(new AffinityTopologyVersion(GRID_CNT + 2, 1));
assertNotNull(topFut);
topFut.get(10_000);
checkFutures();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param nodes Target nodes.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param compress {@code True} if it is possible to use compression for message.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(Collection<ClusterNode> nodes, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, final boolean compress) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
cctx.forAllCaches(new IgniteInClosure<GridCacheContext>() {
@Override
public void apply(GridCacheContext cacheCtx) {
if (!cacheCtx.isLocal()) {
boolean ready;
if (exchId != null) {
AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();
ready = startTopVer.compareTo(exchId.topologyVersion()) <= 0;
} else
ready = cacheCtx.started();
if (ready) {
GridAffinityAssignmentCache affCache = cacheCtx.affinity().affinityCache();
GridDhtPartitionFullMap locMap = cacheCtx.topology().partitionMap(true);
addFullPartitionsMap(m, dupData, compress, cacheCtx.cacheId(), locMap, affCache.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(cacheCtx.cacheId(), cacheCtx.topology().updateCounters(true));
}
}
}
});
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
addFullPartitionsMap(m, dupData, compress, top.cacheId(), map, top.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(top.cacheId(), top.updateCounters(true));
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/
* finishUnmarshall methods are called).
* @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param partHistSuppliers Partition history suppliers map.
* @param partsToReload Partitions to reload map.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE, partHistSuppliers, partsToReload);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
if (exchId != null) {
AffinityTopologyVersion startTopVer = grp.localStartVersion();
if (startTopVer.compareTo(exchId.topologyVersion()) > 0)
continue;
}
GridAffinityAssignmentCache affCache = grp.affinity();
GridDhtPartitionFullMap locMap = grp.topology().partitionMap(true);
if (locMap != null) {
addFullPartitionsMap(m, dupData, compress, grp.groupId(), locMap, affCache.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = grp.topology().fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(grp.groupId(), cntrsMap);
else {
m.addPartitionUpdateCounters(grp.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
}
}
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
if (map != null) {
addFullPartitionsMap(m, dupData, compress, top.groupId(), map, top.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = top.fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(top.groupId(), cntrsMap);
else
m.addPartitionUpdateCounters(top.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class GridCachePartitionExchangeManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
super.start0();
exchWorker = new ExchangeWorker();
latchMgr = new ExchangeLatchManager(cctx.kernalContext());
cctx.gridEvents().addDiscoveryEventListener(discoLsnr, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED, EVT_DISCOVERY_CUSTOM_EVT);
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleMessage.class, new MessageHandler<GridDhtPartitionsSingleMessage>() {
@Override
public void onMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
GridDhtPartitionExchangeId exchangeId = msg.exchangeId();
if (exchangeId != null) {
GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchangeId);
boolean fastReplied = fut.fastReplyOnSingleMessage(node, msg);
if (fastReplied) {
if (log.isInfoEnabled())
log.info("Fast replied to single message " + "[exchId=" + exchangeId + ", nodeId=" + node.id() + "]");
return;
}
} else {
GridDhtPartitionsExchangeFuture cur = lastTopologyFuture();
if (!cur.isDone() && cur.changedAffinity() && !msg.restoreState()) {
cur.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (fut.error() == null)
processSinglePartitionUpdate(node, msg);
}
});
return;
}
}
processSinglePartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsFullMessage.class, new MessageHandler<GridDhtPartitionsFullMessage>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsFullMessage msg) {
if (msg.exchangeId() == null) {
GridDhtPartitionsExchangeFuture currentExchange = lastTopologyFuture();
if (currentExchange != null && currentExchange.addOrMergeDelayedFullMessage(node, msg)) {
if (log.isInfoEnabled()) {
log.info("Delay process full message without exchange id (there is exchange in progress) " + "[nodeId=" + node.id() + "]");
}
return;
}
}
processFullPartitionUpdate(node, msg);
}
});
cctx.io().addCacheHandler(0, GridDhtPartitionsSingleRequest.class, new MessageHandler<GridDhtPartitionsSingleRequest>() {
@Override
public void onMessage(ClusterNode node, GridDhtPartitionsSingleRequest msg) {
processSinglePartitionRequest(node, msg);
}
});
if (!cctx.kernalContext().clientNode()) {
for (int cnt = 0; cnt < cctx.gridConfig().getRebalanceThreadPoolSize(); cnt++) {
final int idx = cnt;
cctx.io().addOrderedCacheGroupHandler(cctx, rebalanceTopic(cnt), new CI2<UUID, GridCacheGroupIdMessage>() {
@Override
public void apply(final UUID id, final GridCacheGroupIdMessage m) {
if (!enterBusy())
return;
try {
CacheGroupContext grp = cctx.cache().cacheGroup(m.groupId());
if (grp != null) {
if (m instanceof GridDhtPartitionSupplyMessage) {
grp.preloader().handleSupplyMessage(id, (GridDhtPartitionSupplyMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandMessage) {
grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m);
return;
} else if (m instanceof GridDhtPartitionDemandLegacyMessage) {
grp.preloader().handleDemandMessage(idx, id, new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage) m));
return;
} else
U.error(log, "Unsupported message type: " + m.getClass().getName());
}
U.warn(log, "Cache group with id=" + m.groupId() + " is stopped or absent");
} finally {
leaveBusy();
}
}
});
}
}
MetricRegistry mreg = cctx.kernalContext().metric().registry(PME_METRICS);
mreg.register(PME_DURATION, () -> currentPMEDuration(false), "Current PME duration in milliseconds.");
mreg.register(PME_OPS_BLOCKED_DURATION, () -> currentPMEDuration(true), "Current PME cache operations blocked duration in milliseconds.");
durationHistogram = mreg.findMetric(PME_DURATION_HISTOGRAM);
blockingDurationHistogram = mreg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
MetricRegistry clusterReg = cctx.kernalContext().metric().registry(CLUSTER_METRICS);
rebalanced = clusterReg.booleanMetric(REBALANCED, "True if the cluster has fully achieved rebalanced state. Note that an inactive cluster always has" + " this metric in False regardless of the real partitions state.");
startLatch.countDown();
}
Aggregations