Search in sources :

Example 6 with GridDhtPartitionsFullMessage

use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.

the class CacheExchangeMergeTest method blockExchangeFinish.

/**
 * @param crd Exchange coordinator.
 * @param topVer Exchange topology version.
 */
private void blockExchangeFinish(Ignite crd, long topVer) {
    final AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer);
    TestRecordingCommunicationSpi.spi(crd).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {

        @Override
        public boolean apply(ClusterNode node, Message msg) {
            if (msg instanceof GridDhtPartitionsFullMessage) {
                GridDhtPartitionsFullMessage msg0 = (GridDhtPartitionsFullMessage) msg;
                return msg0.exchangeId() != null && msg0.exchangeId().topologyVersion().equals(topVer0);
            }
            return false;
        }
    });
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) GridDhtPartitionsAbstractMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsAbstractMessage) GridIoMessage(org.apache.ignite.internal.managers.communication.GridIoMessage) GridDhtPartitionDemandMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemandMessage) Message(org.apache.ignite.plugin.extensions.communication.Message) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)

Example 7 with GridDhtPartitionsFullMessage

use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.

the class TxCrossCacheMapOnInvalidTopologyTest method doTestCrossCacheTxMapOnInvalidTopology.

/**
 * Test scenario: cross-cache tx is started when node is left in the middle of rebalance, first cache is rebalanced
 * and second is partially rebalanced.
 *
 * First cache map request will trigger client compatible remap for pessimistic txs,
 * second cache map request should use new topology version.
 *
 * For optimistic tx remap is enforced if more than one mapping in transaction or all enlisted caches have compatible
 * assignments.
 *
 * Success: tx is finished on ideal topology version over all mapped nodes.
 *
 * @param concurrency Concurrency.
 * @param isolation Isolation.
 */
private void doTestCrossCacheTxMapOnInvalidTopology(TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception {
    try {
        IgniteEx crd = startGrid(0);
        IgniteEx g1 = startGrid(1);
        awaitPartitionMapExchange();
        IgniteEx client = startClientGrid("client");
        assertNotNull(client.cache(CACHE1));
        assertNotNull(client.cache(CACHE2));
        try (IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(CACHE1)) {
            // Put 500 keys per partition.
            for (int k = 0; k < PARTS_CNT * 500; k++) streamer.addData(k, new byte[10]);
        }
        try (IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(CACHE2)) {
            // Put 500 keys per partition.
            for (int k = 0; k < PARTS_CNT * 500; k++) streamer.addData(k, new byte[10]);
        }
        TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(crd);
        final AffinityTopologyVersion joinVer = new AffinityTopologyVersion(4, 0);
        AffinityTopologyVersion leftVer = new AffinityTopologyVersion(5, 0);
        AffinityTopologyVersion idealVer = new AffinityTopologyVersion(5, 1);
        AtomicReference<Set<Integer>> full = new AtomicReference<>();
        GridConcurrentSkipListSet<Integer> leftVerParts = new GridConcurrentSkipListSet<>();
        crdSpi.blockMessages((node, m) -> {
            if (m instanceof GridDhtPartitionSupplyMessage) {
                GridDhtPartitionSupplyMessage msg = (GridDhtPartitionSupplyMessage) m;
                // Allow full rebalance for cache 1 and system cache.
                if (msg.groupId() != CU.cacheId(CACHE2))
                    return false;
                // Allow only first batch for cache 2.
                if (msg.topologyVersion().equals(joinVer)) {
                    if (full.get() == null) {
                        Map<Integer, Long> last = U.field(msg, "last");
                        full.set(last.keySet());
                        return false;
                    }
                    return true;
                }
                if (msg.topologyVersion().equals(leftVer)) {
                    Map<Integer, Long> last = U.field(msg, "last");
                    leftVerParts.addAll(last.keySet());
                    return true;
                }
            } else if (m instanceof GridDhtPartitionsFullMessage) {
                GridDhtPartitionsFullMessage msg = (GridDhtPartitionsFullMessage) m;
                // Delay full message for ideal topology switch.
                GridDhtPartitionExchangeId exchId = msg.exchangeId();
                if (exchId != null && exchId.topologyVersion().equals(idealVer))
                    return true;
            }
            return false;
        });
        TestRecordingCommunicationSpi g1Spi = TestRecordingCommunicationSpi.spi(g1);
        g1Spi.blockMessages((node, msg) -> {
            if (msg instanceof GridDhtPartitionSupplyMessage) {
                GridDhtPartitionSupplyMessage m = (GridDhtPartitionSupplyMessage) msg;
                return m.groupId() == CU.cacheId(CACHE2);
            }
            return false;
        });
        startGrid(2);
        crdSpi.waitForBlocked();
        g1Spi.waitForBlocked();
        // Wait partial owning.
        assertTrue("Timed out while waiting for rebalance", GridTestUtils.waitForCondition(() -> {
            // Await full rebalance for cache 2.
            GridDhtPartitionTopology top0 = grid(2).cachex(CACHE1).context().topology();
            for (int p = 0; p < PARTS_CNT; p++) {
                if (top0.localPartition(p).state() != OWNING)
                    return false;
            }
            // Await partial rebalance for cache 1.
            GridDhtPartitionTopology top1 = grid(2).cachex(CACHE2).context().topology();
            for (Integer part : full.get()) {
                if (top1.localPartition(part).state() != OWNING)
                    return false;
            }
            return true;
        }, 10_000));
        // At this point cache 1 is fully rebalanced and cache 2 is partially rebalanced.
        // Stop supplier in the middle of rebalance.
        g1.close();
        // Wait for topologies and calculate required partitions.
        grid(0).cachex(CACHE1).context().affinity().affinityReadyFuture(leftVer).get();
        grid(2).cachex(CACHE1).context().affinity().affinityReadyFuture(leftVer).get();
        grid(0).cachex(CACHE2).context().affinity().affinityReadyFuture(leftVer).get();
        grid(2).cachex(CACHE2).context().affinity().affinityReadyFuture(leftVer).get();
        AffinityAssignment assignment0 = grid(0).cachex(CACHE1).context().affinity().assignment(leftVer);
        AffinityAssignment assignment = grid(0).cachex(CACHE2).context().affinity().assignment(leftVer);
        // Search for a partition with incompatible assignment.
        // Partition for cache1 which is mapped for both late and ideal topologies to the same primary.
        int stablePart = -1;
        // Partition for cache2 which is mapped for both late and ideal topologies on different primaries.
        int movingPart = -1;
        for (int p = 0; p < assignment0.assignment().size(); p++) {
            List<ClusterNode> curr = assignment.assignment().get(p);
            List<ClusterNode> ideal = assignment.idealAssignment().get(p);
            if (curr.equals(ideal) && curr.get(0).order() == 1) {
                stablePart = p;
                break;
            }
        }
        assertFalse(stablePart == -1);
        for (int p = 0; p < assignment.assignment().size(); p++) {
            List<ClusterNode> curr = assignment.assignment().get(p);
            List<ClusterNode> ideal = assignment.idealAssignment().get(p);
            if (!curr.equals(ideal) && curr.get(0).order() == 1) {
                movingPart = p;
                break;
            }
        }
        assertFalse(movingPart == -1);
        TestRecordingCommunicationSpi.spi(client).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {

            @Override
            public boolean apply(ClusterNode node, Message msg) {
                if (concurrency == PESSIMISTIC)
                    return msg instanceof GridNearLockRequest;
                else
                    return msg instanceof GridNearTxPrepareRequest;
            }
        });
        final int finalStablePart = stablePart;
        final int finalMovingPart = movingPart;
        IgniteInternalFuture<?> txFut = multithreadedAsync(() -> {
            try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
                // Will map on crd(order=1).
                client.cache(CACHE1).put(finalStablePart, 0);
                // Next request will remap to ideal topology, but it's not ready on other node except crd.
                client.cache(CACHE2).put(finalMovingPart, 0);
                tx.commit();
            }
        }, 1, "tx-thread");
        // Wait until all missing supply messages are blocked.
        assertTrue(GridTestUtils.waitForCondition(() -> leftVerParts.size() == PARTS_CNT - full.get().size(), 5_000));
        // Delay first lock request on late topology.
        TestRecordingCommunicationSpi.spi(client).waitForBlocked();
        // At this point only supply messages should be blocked.
        // Unblock to continue rebalance and trigger ideal topology switch.
        crdSpi.stopBlock(true, null, false, true);
        // Wait until ideal topology is ready on crd.
        crd.context().cache().context().exchange().affinityReadyFuture(idealVer).get(10_000);
        // Other node must wait for full message.
        assertFalse(GridTestUtils.waitForCondition(() -> grid(2).context().cache().context().exchange().affinityReadyFuture(idealVer).isDone(), 1_000));
        // Map on unstable topology (PME is in progress on other node).
        TestRecordingCommunicationSpi.spi(client).stopBlock();
        // Capture local transaction.
        IgniteInternalTx tx0 = client.context().cache().context().tm().activeTransactions().iterator().next();
        // Expected behavior: tx must hang (both pessimistic and optimistic) because topology is not ready.
        try {
            txFut.get(3_000);
            fail("TX must not complete");
        } catch (IgniteFutureTimeoutCheckedException e) {
        // Expected.
        }
        crdSpi.stopBlock();
        txFut.get();
        // Check transaction map version. Should be mapped on ideal topology.
        assertEquals(tx0.topologyVersionSnapshot(), idealVer);
        awaitPartitionMapExchange();
        checkFutures();
    } finally {
        stopAllGrids();
    }
}
Also used : AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment) GridConcurrentSkipListSet(org.apache.ignite.internal.util.GridConcurrentSkipListSet) Set(java.util.Set) GridDhtPartitionSupplyMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) Message(org.apache.ignite.plugin.extensions.communication.Message) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridNearLockRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest) GridDhtPartitionExchangeId(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) IgniteFutureTimeoutCheckedException(org.apache.ignite.internal.IgniteFutureTimeoutCheckedException) ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) AtomicReference(java.util.concurrent.atomic.AtomicReference) GridDhtPartitionSupplyMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage) TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) Transaction(org.apache.ignite.transactions.Transaction) IgniteEx(org.apache.ignite.internal.IgniteEx) GridNearTxPrepareRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest) GridConcurrentSkipListSet(org.apache.ignite.internal.util.GridConcurrentSkipListSet)

Example 8 with GridDhtPartitionsFullMessage

use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.

the class ClusterMetricsSelfTest method checkPmeMetricsOnNodeJoin.

/**
 * @param client Client flag.
 * @throws Exception If failed.
 */
private void checkPmeMetricsOnNodeJoin(boolean client) throws Exception {
    IgniteEx ignite = startGrid(0);
    MetricRegistry reg = ignite.context().metric().registry(PME_METRICS);
    LongMetric currentPMEDuration = reg.findMetric(PME_DURATION);
    LongMetric currentBlockingPMEDuration = reg.findMetric(PME_OPS_BLOCKED_DURATION);
    HistogramMetricImpl durationHistogram = reg.findMetric(PME_DURATION_HISTOGRAM);
    HistogramMetricImpl blockindDurationHistogram = reg.findMetric(PME_OPS_BLOCKED_DURATION_HISTOGRAM);
    IgniteCache<Object, Object> cache = ignite.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAtomicityMode(TRANSACTIONAL));
    cache.put(1, 1);
    awaitPartitionMapExchange();
    int timeout = 5000;
    assertTrue(GridTestUtils.waitForCondition(() -> currentPMEDuration.value() == 0, timeout));
    assertEquals(0, currentBlockingPMEDuration.value());
    // There was two blocking exchange: server node start and cache start.
    assertEquals(2, Arrays.stream(durationHistogram.value()).sum());
    assertEquals(2, Arrays.stream(blockindDurationHistogram.value()).sum());
    Lock lock = cache.lock(1);
    lock.lock();
    TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(ignite);
    spi.blockMessages((node, message) -> message instanceof GridDhtPartitionsFullMessage);
    GridTestUtils.runAsync(() -> client ? startClientGrid("client") : startGrid(1));
    assertTrue(waitForCondition(() -> ignite.context().cache().context().exchange().lastTopologyFuture().initialVersion().topologyVersion() == 2, timeout));
    if (client)
        assertEquals(0, currentBlockingPMEDuration.value());
    else
        assertTrue(currentBlockingPMEDuration.value() > 0);
    lock.unlock();
    spi.waitForBlocked();
    spi.stopBlock();
    awaitPartitionMapExchange();
    assertTrue(GridTestUtils.waitForCondition(() -> currentPMEDuration.value() == 0, timeout));
    assertEquals(0, currentBlockingPMEDuration.value());
    if (client) {
        // There was non-blocking exchange: client node start.
        assertEquals(3, Arrays.stream(durationHistogram.value()).sum());
        assertEquals(2, Arrays.stream(blockindDurationHistogram.value()).sum());
    } else {
        // There was two blocking exchange: server node start and rebalance completing.
        assertEquals(4, Arrays.stream(durationHistogram.value()).sum());
        assertEquals(4, Arrays.stream(blockindDurationHistogram.value()).sum());
    }
}
Also used : TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) IgniteEx(org.apache.ignite.internal.IgniteEx) MetricRegistry(org.apache.ignite.internal.processors.metric.MetricRegistry) LongMetric(org.apache.ignite.spi.metric.LongMetric) HistogramMetricImpl(org.apache.ignite.internal.processors.metric.impl.HistogramMetricImpl) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) Lock(java.util.concurrent.locks.Lock)

Example 9 with GridDhtPartitionsFullMessage

use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.

the class IgnitePdsDestroyCacheTest method cacheDestroyWithConcImplicitTx.

/**
 * Tests correctness of concurrent cache destroy and implicit tx`s.
 */
@Test
public void cacheDestroyWithConcImplicitTx() throws Exception {
    final IgniteEx crd = (IgniteEx) startGridsMultiThreaded(3);
    crd.cluster().state(ClusterState.ACTIVE);
    crd.createCache(new CacheConfiguration(DEFAULT_CACHE_NAME).setBackups(1).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("test"));
    // Cache group with multiple caches are important here, in this case cache removals are not so rapid.
    crd.createCache(new CacheConfiguration(DEFAULT_CACHE_NAME + "_1").setBackups(1).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("test"));
    Set<Integer> pkeys = new TreeSet<>();
    try (final IgniteDataStreamer<Object, Object> streamer = crd.dataStreamer(DEFAULT_CACHE_NAME)) {
        for (int i = 0; i < 100; i++) {
            streamer.addData(i, i);
            if (crd.affinity(DEFAULT_CACHE_NAME).isPrimary(crd.localNode(), i))
                pkeys.add(i);
        }
    }
    TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(crd);
    spi.blockMessages(GridDhtTxPrepareRequest.class, getTestIgniteInstanceName(1));
    List<IgniteFuture<Boolean>> asyncRmFut = new ArrayList<>(100);
    for (Integer pkey : pkeys) asyncRmFut.add(crd.cache(DEFAULT_CACHE_NAME).removeAsync(pkey));
    spi.blockMessages(GridDhtPartitionsFullMessage.class, getTestIgniteInstanceName(1));
    IgniteInternalFuture destr = GridTestUtils.runAsync(() -> grid(1).destroyCache(DEFAULT_CACHE_NAME));
    spi.waitForBlocked();
    spi.stopBlock(true, (msg) -> msg.ioMessage().message() instanceof GridDhtPartitionsFullMessage);
    spi.stopBlock();
    destr.get();
    // A little bit untipattern approach here, just because of async remapping, check
    // GridNearOptimisticTxPrepareFutureAdapter.prepareOnTopology.
    // With redefined Failure handler we still need the same approach: wait some time and checks that it not raises.
    assertFalse(GridTestUtils.waitForCondition(() -> G.allGrids().size() < 3, 5_000));
    try {
        asyncRmFut.forEach(f -> f.get(getTestTimeout() / 2));
    } catch (CacheException ignore) {
    // No op.
    }
}
Also used : CacheException(javax.cache.CacheException) ArrayList(java.util.ArrayList) IgniteFuture(org.apache.ignite.lang.IgniteFuture) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) TreeSet(java.util.TreeSet) IgniteEx(org.apache.ignite.internal.IgniteEx) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) Test(org.junit.Test)

Example 10 with GridDhtPartitionsFullMessage

use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage in project ignite by apache.

the class GridExchangeFreeSwitchTest method startPmeMessagesCounting.

/**
 * @param nodes Nodes.
 * @param singleCnt Counter for GridDhtPartitionsSingleMessage.
 * @param fullCnt Counter for GridDhtPartitionsFullMessage.
 */
private void startPmeMessagesCounting(int nodes, AtomicLong singleCnt, AtomicLong fullCnt) {
    for (int i = 0; i < nodes; i++) {
        TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite(i).configuration().getCommunicationSpi();
        spi.closure(new IgniteBiInClosure<ClusterNode, Message>() {

            @Override
            public void apply(ClusterNode node, Message msg) {
                if (msg.getClass().equals(GridDhtPartitionsSingleMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
                    singleCnt.incrementAndGet();
                if (msg.getClass().equals(GridDhtPartitionsFullMessage.class) && ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null)
                    fullCnt.incrementAndGet();
            }
        });
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) GridDhtPartitionsAbstractMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsAbstractMessage) GridDhtPartitionsSingleMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage) GridDhtPartitionsFullMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage) Message(org.apache.ignite.plugin.extensions.communication.Message)

Aggregations

GridDhtPartitionsFullMessage (org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage)22 TestRecordingCommunicationSpi (org.apache.ignite.internal.TestRecordingCommunicationSpi)12 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)12 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 Message (org.apache.ignite.plugin.extensions.communication.Message)8 Ignite (org.apache.ignite.Ignite)7 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)7 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)5 IgniteEx (org.apache.ignite.internal.IgniteEx)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 GridDhtPartitionsAbstractMessage (org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsAbstractMessage)4 Transaction (org.apache.ignite.transactions.Transaction)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 IgniteFutureTimeoutCheckedException (org.apache.ignite.internal.IgniteFutureTimeoutCheckedException)3 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)3 GridAffinityAssignmentCache (org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache)3 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)3