use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class CacheExchangeMergeTest method blockExchangeFinish.
/**
* @param crd Exchange coordinator.
* @param topVer Exchange topology version.
*/
private void blockExchangeFinish(Ignite crd, long topVer) {
final AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer);
TestRecordingCommunicationSpi.spi(crd).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtPartitionsFullMessage) {
GridDhtPartitionsFullMessage msg0 = (GridDhtPartitionsFullMessage) msg;
return msg0.exchangeId() != null && msg0.exchangeId().topologyVersion().equals(topVer0);
}
return false;
}
});
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class TxCrossCacheMapOnInvalidTopologyTest method doTestCrossCacheTxMapOnInvalidTopology.
/**
* Test scenario: cross-cache tx is started when node is left in the middle of rebalance, first cache is rebalanced
* and second is partially rebalanced.
*
* First cache map request will trigger client compatible remap for pessimistic txs,
* second cache map request should use new topology version.
*
* For optimistic tx remap is enforced if more than one mapping in transaction or all enlisted caches have compatible
* assignments.
*
* Success: tx is finished on ideal topology version over all mapped nodes.
*
* @param concurrency Concurrency.
* @param isolation Isolation.
*/
private void doTestCrossCacheTxMapOnInvalidTopology(TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception {
try {
IgniteEx crd = startGrid(0);
IgniteEx g1 = startGrid(1);
awaitPartitionMapExchange();
IgniteEx client = startClientGrid("client");
assertNotNull(client.cache(CACHE1));
assertNotNull(client.cache(CACHE2));
try (IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(CACHE1)) {
// Put 500 keys per partition.
for (int k = 0; k < PARTS_CNT * 500; k++) streamer.addData(k, new byte[10]);
}
try (IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(CACHE2)) {
// Put 500 keys per partition.
for (int k = 0; k < PARTS_CNT * 500; k++) streamer.addData(k, new byte[10]);
}
TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(crd);
final AffinityTopologyVersion joinVer = new AffinityTopologyVersion(4, 0);
AffinityTopologyVersion leftVer = new AffinityTopologyVersion(5, 0);
AffinityTopologyVersion idealVer = new AffinityTopologyVersion(5, 1);
AtomicReference<Set<Integer>> full = new AtomicReference<>();
GridConcurrentSkipListSet<Integer> leftVerParts = new GridConcurrentSkipListSet<>();
crdSpi.blockMessages((node, m) -> {
if (m instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage msg = (GridDhtPartitionSupplyMessage) m;
// Allow full rebalance for cache 1 and system cache.
if (msg.groupId() != CU.cacheId(CACHE2))
return false;
// Allow only first batch for cache 2.
if (msg.topologyVersion().equals(joinVer)) {
if (full.get() == null) {
Map<Integer, Long> last = U.field(msg, "last");
full.set(last.keySet());
return false;
}
return true;
}
if (msg.topologyVersion().equals(leftVer)) {
Map<Integer, Long> last = U.field(msg, "last");
leftVerParts.addAll(last.keySet());
return true;
}
} else if (m instanceof GridDhtPartitionsFullMessage) {
GridDhtPartitionsFullMessage msg = (GridDhtPartitionsFullMessage) m;
// Delay full message for ideal topology switch.
GridDhtPartitionExchangeId exchId = msg.exchangeId();
if (exchId != null && exchId.topologyVersion().equals(idealVer))
return true;
}
return false;
});
TestRecordingCommunicationSpi g1Spi = TestRecordingCommunicationSpi.spi(g1);
g1Spi.blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage m = (GridDhtPartitionSupplyMessage) msg;
return m.groupId() == CU.cacheId(CACHE2);
}
return false;
});
startGrid(2);
crdSpi.waitForBlocked();
g1Spi.waitForBlocked();
// Wait partial owning.
assertTrue("Timed out while waiting for rebalance", GridTestUtils.waitForCondition(() -> {
// Await full rebalance for cache 2.
GridDhtPartitionTopology top0 = grid(2).cachex(CACHE1).context().topology();
for (int p = 0; p < PARTS_CNT; p++) {
if (top0.localPartition(p).state() != OWNING)
return false;
}
// Await partial rebalance for cache 1.
GridDhtPartitionTopology top1 = grid(2).cachex(CACHE2).context().topology();
for (Integer part : full.get()) {
if (top1.localPartition(part).state() != OWNING)
return false;
}
return true;
}, 10_000));
// At this point cache 1 is fully rebalanced and cache 2 is partially rebalanced.
// Stop supplier in the middle of rebalance.
g1.close();
// Wait for topologies and calculate required partitions.
grid(0).cachex(CACHE1).context().affinity().affinityReadyFuture(leftVer).get();
grid(2).cachex(CACHE1).context().affinity().affinityReadyFuture(leftVer).get();
grid(0).cachex(CACHE2).context().affinity().affinityReadyFuture(leftVer).get();
grid(2).cachex(CACHE2).context().affinity().affinityReadyFuture(leftVer).get();
AffinityAssignment assignment0 = grid(0).cachex(CACHE1).context().affinity().assignment(leftVer);
AffinityAssignment assignment = grid(0).cachex(CACHE2).context().affinity().assignment(leftVer);
// Search for a partition with incompatible assignment.
// Partition for cache1 which is mapped for both late and ideal topologies to the same primary.
int stablePart = -1;
// Partition for cache2 which is mapped for both late and ideal topologies on different primaries.
int movingPart = -1;
for (int p = 0; p < assignment0.assignment().size(); p++) {
List<ClusterNode> curr = assignment.assignment().get(p);
List<ClusterNode> ideal = assignment.idealAssignment().get(p);
if (curr.equals(ideal) && curr.get(0).order() == 1) {
stablePart = p;
break;
}
}
assertFalse(stablePart == -1);
for (int p = 0; p < assignment.assignment().size(); p++) {
List<ClusterNode> curr = assignment.assignment().get(p);
List<ClusterNode> ideal = assignment.idealAssignment().get(p);
if (!curr.equals(ideal) && curr.get(0).order() == 1) {
movingPart = p;
break;
}
}
assertFalse(movingPart == -1);
TestRecordingCommunicationSpi.spi(client).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
if (concurrency == PESSIMISTIC)
return msg instanceof GridNearLockRequest;
else
return msg instanceof GridNearTxPrepareRequest;
}
});
final int finalStablePart = stablePart;
final int finalMovingPart = movingPart;
IgniteInternalFuture<?> txFut = multithreadedAsync(() -> {
try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
// Will map on crd(order=1).
client.cache(CACHE1).put(finalStablePart, 0);
// Next request will remap to ideal topology, but it's not ready on other node except crd.
client.cache(CACHE2).put(finalMovingPart, 0);
tx.commit();
}
}, 1, "tx-thread");
// Wait until all missing supply messages are blocked.
assertTrue(GridTestUtils.waitForCondition(() -> leftVerParts.size() == PARTS_CNT - full.get().size(), 5_000));
// Delay first lock request on late topology.
TestRecordingCommunicationSpi.spi(client).waitForBlocked();
// At this point only supply messages should be blocked.
// Unblock to continue rebalance and trigger ideal topology switch.
crdSpi.stopBlock(true, null, false, true);
// Wait until ideal topology is ready on crd.
crd.context().cache().context().exchange().affinityReadyFuture(idealVer).get(10_000);
// Other node must wait for full message.
assertFalse(GridTestUtils.waitForCondition(() -> grid(2).context().cache().context().exchange().affinityReadyFuture(idealVer).isDone(), 1_000));
// Map on unstable topology (PME is in progress on other node).
TestRecordingCommunicationSpi.spi(client).stopBlock();
// Capture local transaction.
IgniteInternalTx tx0 = client.context().cache().context().tm().activeTransactions().iterator().next();
// Expected behavior: tx must hang (both pessimistic and optimistic) because topology is not ready.
try {
txFut.get(3_000);
fail("TX must not complete");
} catch (IgniteFutureTimeoutCheckedException e) {
// Expected.
}
crdSpi.stopBlock();
txFut.get();
// Check transaction map version. Should be mapped on ideal topology.
assertEquals(tx0.topologyVersionSnapshot(), idealVer);
awaitPartitionMapExchange();
checkFutures();
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class ClusterMetricsSelfTest method checkPmeMetricsOnNodeJoin.
/**
* @param client Client flag.
* @throws Exception If failed.
*/
private void checkPmeMetricsOnNodeJoin(boolean client) throws Exception {
IgniteEx ignite = startGrid(0);
MetricRegistry reg = ignite.context().metric().registry(PME_METRICS);
LongMetric currentPMEDuration = reg.findMetric(PME_DURATION);
LongMetric currentBlockingPMEDuration = reg.findMetric(PME_OPS_BLOCKED_DURATION);
HistogramMetricImpl durationHistogram = reg.findMetric(PME_DURATION_HISTOGRAM);
HistogramMetricImpl blockindDurationHistogram = reg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
IgniteCache<Object, Object> cache = ignite.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAtomicityMode(TRANSACTIONAL));
cache.put(1, 1);
awaitPartitionMapExchange();
int timeout = 5000;
assertTrue(GridTestUtils.waitForCondition(() -> currentPMEDuration.value() == 0, timeout));
assertEquals(0, currentBlockingPMEDuration.value());
// There was two blocking exchange: server node start and cache start.
assertEquals(2, Arrays.stream(durationHistogram.value()).sum());
assertEquals(2, Arrays.stream(blockindDurationHistogram.value()).sum());
Lock lock = cache.lock(1);
lock.lock();
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(ignite);
spi.blockMessages((node, message) -> message instanceof GridDhtPartitionsFullMessage);
GridTestUtils.runAsync(() -> client ? startClientGrid("client") : startGrid(1));
assertTrue(waitForCondition(() -> ignite.context().cache().context().exchange().lastTopologyFuture().initialVersion().topologyVersion() == 2, timeout));
if (client)
assertEquals(0, currentBlockingPMEDuration.value());
else
assertTrue(currentBlockingPMEDuration.value() > 0);
lock.unlock();
spi.waitForBlocked();
spi.stopBlock();
awaitPartitionMapExchange();
assertTrue(GridTestUtils.waitForCondition(() -> currentPMEDuration.value() == 0, timeout));
assertEquals(0, currentBlockingPMEDuration.value());
if (client) {
// There was non-blocking exchange: client node start.
assertEquals(3, Arrays.stream(durationHistogram.value()).sum());
assertEquals(2, Arrays.stream(blockindDurationHistogram.value()).sum());
} else {
// There was two blocking exchange: server node start and rebalance completing.
assertEquals(4, Arrays.stream(durationHistogram.value()).sum());
assertEquals(4, Arrays.stream(blockindDurationHistogram.value()).sum());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class IgnitePdsDestroyCacheTest method cacheDestroyWithConcImplicitTx.
/**
* Tests correctness of concurrent cache destroy and implicit tx`s.
*/
@Test
public void cacheDestroyWithConcImplicitTx() throws Exception {
final IgniteEx crd = (IgniteEx) startGridsMultiThreaded(3);
crd.cluster().state(ClusterState.ACTIVE);
crd.createCache(new CacheConfiguration(DEFAULT_CACHE_NAME).setBackups(1).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("test"));
// Cache group with multiple caches are important here, in this case cache removals are not so rapid.
crd.createCache(new CacheConfiguration(DEFAULT_CACHE_NAME + "_1").setBackups(1).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("test"));
Set<Integer> pkeys = new TreeSet<>();
try (final IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(DEFAULT_CACHE_NAME)) {
for (int i = 0; i < 100; i++) {
streamer.addData(i, i);
if (crd.affinity(DEFAULT_CACHE_NAME).isPrimary(crd.localNode(), i))
pkeys.add(i);
}
}
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(crd);
spi.blockMessages(GridDhtTxPrepareRequest.class, getTestIgniteInstanceName(1));
List<IgniteFuture<Boolean>> asyncRmFut = new ArrayList<>(100);
for (Integer pkey : pkeys) asyncRmFut.add(crd.cache(DEFAULT_CACHE_NAME).removeAsync(pkey));
spi.blockMessages(GridDhtPartitionsFullMessage.class, getTestIgniteInstanceName(1));
IgniteInternalFuture destr = GridTestUtils.runAsync(() -> grid(1).destroyCache(DEFAULT_CACHE_NAME));
spi.waitForBlocked();
spi.stopBlock(true, (msg) -> msg.ioMessage().message() instanceof GridDhtPartitionsFullMessage);
spi.stopBlock();
destr.get();
// A little bit untipattern approach here, just because of async remapping, check
// GridNearOptimisticTxPrepareFutureAdapter.prepareOnTopology.
// With redefined Failure handler we still need the same approach: wait some time and checks that it not raises.
assertFalse(GridTestUtils.waitForCondition(() -> G.allGrids().size() < 3, 5_000));
try {
asyncRmFut.forEach(f -> f.get(getTestTimeout() / 2));
} catch (CacheException ignore) {
// No op.
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.
the class GridExchangeFreeSwitchTest method startPmeMessagesCounting.
/**
* @param nodes Nodes.
* @param singleCnt Counter for GridDhtPartitionsSingleMessage.
* @param fullCnt Counter for GridDhtPartitionsFullMessage.
*/
private void startPmeMessagesCounting(int nodes, AtomicLong singleCnt, AtomicLong fullCnt) {
for (int i = 0; i < nodes; i++) {
TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite(i).configuration().getCommunicationSpi();
spi.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg.getClass().equals(GridDhtPartitionsSingleMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
singleCnt.incrementAndGet();
if (msg.getClass().equals(GridDhtPartitionsFullMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
fullCnt.incrementAndGet();
}
});
}
}
Aggregations