Search in sources :

Example 36 with GridConcurrentHashSet

use of org.apache.ignite.internal.util.GridConcurrentHashSet in project ignite by apache.

the class GridExchangeFreeCellularSwitchIsolationTest method testOnlyAffectedNodesWaitForRecovery.

/**
 * Test checks than non-affected nodes (alive cells) finishes the switch asap,
 * that they wait only for the recovery related to these nodes (eg. replicated caches recovery that affects every node).
 */
@Test
public void testOnlyAffectedNodesWaitForRecovery() throws Exception {
    int nodes = 6;
    String recoveryStatusMsg = "TxRecovery Status and Timings [txs=";
    // Any.
    LogListener lsnrAny = matches(recoveryStatusMsg).build();
    LogListener lsnrBrokenCell = matches(recoveryStatusMsg).times((nodes / 2) - 1).build();
    LogListener lsnrAliveCell = matches(recoveryStatusMsg).times((nodes / 2)).build();
    listeningLog.registerListener(lsnrAny);
    startGridsMultiThreaded(nodes);
    blockRecoveryMessages();
    CellularCluster cluster = resolveCellularCluster(nodes, startFrom);
    Ignite orig = cluster.orig;
    Ignite failed = cluster.failed;
    List<Ignite> brokenCellNodes = cluster.brokenCellNodes;
    List<Ignite> aliveCellNodes = cluster.aliveCellNodes;
    List<Integer> partKeys = new ArrayList<>();
    List<Integer> replKeys = new ArrayList<>();
    for (Ignite node : G.allGrids()) {
        if (!node.configuration().isClientMode()) {
            partKeys.add(primaryKey(node.getOrCreateCache(PART_CACHE_NAME)));
            replKeys.add(primaryKey(node.getOrCreateCache(REPL_CACHE_NAME)));
        }
    }
    CountDownLatch partPreparedLatch = new CountDownLatch(nodes);
    CountDownLatch replPreparedLatch = new CountDownLatch(nodes);
    CountDownLatch partCommitLatch = new CountDownLatch(1);
    CountDownLatch replCommitLatch = new CountDownLatch(1);
    AtomicInteger partKeyIdx = new AtomicInteger();
    AtomicInteger replKeyIdx = new AtomicInteger();
    Set<GridCacheVersion> partTxVers = new GridConcurrentHashSet<>();
    Set<GridCacheVersion> replTxVers = new GridConcurrentHashSet<>();
    IgniteInternalFuture<?> partFut = multithreadedAsync(() -> {
        try {
            int idx = partKeyIdx.getAndIncrement();
            Transaction tx = orig.transactions().txStart();
            partTxVers.add(((TransactionProxyImpl<?, ?>) tx).tx().nearXidVersion());
            int key = partKeys.get(idx);
            orig.getOrCreateCache(PART_CACHE_NAME).put(key, key);
            ((TransactionProxyImpl<?, ?>) tx).tx().prepare(true);
            partPreparedLatch.countDown();
            partCommitLatch.await();
            if (orig != failed)
                ((TransactionProxyImpl<?, ?>) tx).commit();
        } catch (Exception e) {
            fail("Should not happen [exception=" + e + "]");
        }
    }, nodes);
    IgniteInternalFuture<?> replFut = multithreadedAsync(() -> {
        try {
            int idx = replKeyIdx.getAndIncrement();
            Transaction tx = orig.transactions().txStart();
            replTxVers.add(((TransactionProxyImpl<?, ?>) tx).tx().nearXidVersion());
            int key = replKeys.get(idx);
            orig.getOrCreateCache(REPL_CACHE_NAME).put(key, key);
            ((TransactionProxyImpl<?, ?>) tx).tx().prepare(true);
            replPreparedLatch.countDown();
            replCommitLatch.await();
            if (orig != failed)
                ((TransactionProxyImpl<?, ?>) tx).commit();
        } catch (Exception e) {
            fail("Should not happen [exception=" + e + "]");
        }
    }, nodes);
    partPreparedLatch.await();
    replPreparedLatch.await();
    checkTransactionsCount(orig, nodes, brokenCellNodes, nodes / 2, aliveCellNodes, nodes / 2, partTxVers);
    checkTransactionsCount(orig, nodes, brokenCellNodes, nodes, aliveCellNodes, nodes, replTxVers);
    assertFalse(lsnrAny.check());
    listeningLog.registerListener(lsnrAliveCell);
    // Stopping node.
    failed.close();
    awaitForSwitchOnNodeLeft(failed);
    // In case of originating node failed all alive primaries will recover (commit) txs on tx cordinator falure.
    // Txs with failed primary will start recovery, but can't finish it since recovery messages are blocked.
    // Broken cell's nodes will have 1 unrecovered tx for partitioned cache.
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    nodes, brokenCellNodes, orig == failed ? 1 : nodes / 2, aliveCellNodes, orig == failed ? 0 : nodes / 2, partTxVers);
    // All cell's nodes will have 1 unrecovered tx for replicated cache.
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    nodes, brokenCellNodes, orig == failed ? 1 : nodes, aliveCellNodes, orig == failed ? 1 : nodes, replTxVers);
    // Counts tx's creations and preparations.
    BiConsumer<T2<Ignite, String>, T3<CountDownLatch, CountDownLatch, CountDownLatch>> txRun = (T2<Ignite, String> pair, T3</*create*/
    CountDownLatch, /*put*/
    CountDownLatch, /*commit*/
    CountDownLatch> latches) -> {
        try {
            Ignite ignite = pair.get1();
            String cacheName = pair.get2();
            IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(cacheName);
            try (Transaction tx = ignite.transactions().txStart()) {
                // Create.
                latches.get1().countDown();
                // Avoiding intersection with prepared keys.
                cache.put(primaryKeys(cache, 1, 1_000).get(0), 42);
                // Put.
                latches.get2().countDown();
                tx.commit();
                // Commit.
                latches.get3().countDown();
            }
        } catch (Exception e) {
            fail("Should not happen [exception=" + e + "]");
        }
    };
    CountDownLatch partBrokenCellCreateLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch partBrokenCellPutLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch partBrokenCellCommitLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch partAliveCellCreateLatch = new CountDownLatch(aliveCellNodes.size());
    CountDownLatch partAliveCellPutLatch = new CountDownLatch(aliveCellNodes.size());
    CountDownLatch partAliveCellCommitLatch = new CountDownLatch(aliveCellNodes.size());
    CountDownLatch replBrokenCellCreateLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch replBrokenCellPutLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch replBrokenCellCommitLatch = new CountDownLatch(brokenCellNodes.size());
    CountDownLatch replAliveCellCreateLatch = new CountDownLatch(aliveCellNodes.size());
    CountDownLatch replAliveCellPutLatch = new CountDownLatch(aliveCellNodes.size());
    CountDownLatch replAliveCellCommitLatch = new CountDownLatch(aliveCellNodes.size());
    List<IgniteInternalFuture<?>> futs = new ArrayList<>();
    for (Ignite brokenCellNode : brokenCellNodes) {
        futs.add(multithreadedAsync(() -> txRun.accept(new T2<>(brokenCellNode, REPL_CACHE_NAME), new T3<>(replBrokenCellCreateLatch, replBrokenCellPutLatch, replBrokenCellCommitLatch)), 1));
        futs.add(multithreadedAsync(() -> txRun.accept(new T2<>(brokenCellNode, PART_CACHE_NAME), new T3<>(partBrokenCellCreateLatch, partBrokenCellPutLatch, partBrokenCellCommitLatch)), 1));
    }
    for (Ignite aliveCellNode : aliveCellNodes) {
        futs.add(multithreadedAsync(() -> txRun.accept(new T2<>(aliveCellNode, REPL_CACHE_NAME), new T3<>(replAliveCellCreateLatch, replAliveCellPutLatch, replAliveCellCommitLatch)), 1));
        futs.add(multithreadedAsync(() -> txRun.accept(new T2<>(aliveCellNode, PART_CACHE_NAME), new T3<>(partAliveCellCreateLatch, partAliveCellPutLatch, partAliveCellCommitLatch)), 1));
    }
    // Switch in progress cluster-wide.
    // Alive nodes switch blocked until replicated caches recovery happen.
    checkUpcomingTransactionsState(// Started.
    partBrokenCellCreateLatch, // Started.
    0, partBrokenCellPutLatch, brokenCellNodes.size(), partBrokenCellCommitLatch, brokenCellNodes.size(), // Started. Blocked by replicated cache recovery.
    partAliveCellCreateLatch, // Started. Blocked by replicated cache recovery.
    0, partAliveCellPutLatch, aliveCellNodes.size(), partAliveCellCommitLatch, aliveCellNodes.size());
    checkUpcomingTransactionsState(// Started.
    replBrokenCellCreateLatch, // Started.
    0, replBrokenCellPutLatch, brokenCellNodes.size(), replBrokenCellCommitLatch, brokenCellNodes.size(), // Started. Blocked by replicated cache recovery.
    replAliveCellCreateLatch, // Started. Blocked by replicated cache recovery.
    0, replAliveCellPutLatch, aliveCellNodes.size(), replAliveCellCommitLatch, aliveCellNodes.size());
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    nodes, brokenCellNodes, orig == failed ? 1 : nodes / 2, aliveCellNodes, orig == failed ? 0 : nodes / 2, partTxVers);
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    nodes, brokenCellNodes, orig == failed ? 1 : nodes, aliveCellNodes, orig == failed ? 1 : nodes, replTxVers);
    // Replicated recovery.
    for (Ignite ignite : G.allGrids()) {
        TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite.configuration().getCommunicationSpi();
        spi.stopBlock(true, blockedMsg -> {
            Message msg = blockedMsg.ioMessage().message();
            return replTxVers.contains(((GridCacheTxRecoveryRequest) msg).nearXidVersion());
        });
    }
    replCommitLatch.countDown();
    replFut.get();
    // Switch partially finished.
    // Broken cell still in switch.
    // Alive cell finished the switch.
    checkUpcomingTransactionsState(// Started.
    partBrokenCellCreateLatch, // Started.
    0, partBrokenCellPutLatch, brokenCellNodes.size(), partBrokenCellCommitLatch, brokenCellNodes.size(), // Started.
    partAliveCellCreateLatch, // Started.
    0, // Alive cell nodes's able to start transactions on primaries,
    partAliveCellPutLatch, // Alive cell nodes's able to start transactions on primaries,
    0, partAliveCellCommitLatch, // Able to commit, since all primaries and backups are inside the alive cell.
    0);
    checkUpcomingTransactionsState(// Started.
    replBrokenCellCreateLatch, // Started.
    0, replBrokenCellPutLatch, brokenCellNodes.size(), replBrokenCellCommitLatch, brokenCellNodes.size(), // Started.
    replAliveCellCreateLatch, // Started.
    0, // Alive cell's nodes able to start transactions on primaries,
    replAliveCellPutLatch, // Alive cell's nodes able to start transactions on primaries,
    0, replAliveCellCommitLatch, // But not able to commit, since broken cell's nodes still in switch.
    aliveCellNodes.size());
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    nodes, brokenCellNodes, orig == failed ? 1 : nodes / 2, // New txs able to start while previous are in progress.
    aliveCellNodes, // New txs able to start while previous are in progress.
    orig == failed ? 0 : nodes / 2, /*to be committed*/
    partTxVers);
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    0, brokenCellNodes, 0, aliveCellNodes, 0, replTxVers);
    // Recovery finished on alive cell.
    assertTrue(waitForCondition(lsnrAliveCell::check, 5000));
    listeningLog.registerListener(lsnrBrokenCell);
    // Partitioned recovery.
    for (Ignite ignite : G.allGrids()) {
        TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) ignite.configuration().getCommunicationSpi();
        spi.stopBlock(true, blockedMsg -> {
            Message msg = blockedMsg.ioMessage().message();
            return partTxVers.contains(((GridCacheTxRecoveryRequest) msg).nearXidVersion());
        });
    }
    partCommitLatch.countDown();
    partFut.get();
    // Switches finished cluster-wide, all transactions can be committed.
    checkUpcomingTransactionsState(replBrokenCellCreateLatch, 0, replBrokenCellPutLatch, 0, replBrokenCellCommitLatch, 0, replAliveCellCreateLatch, 0, replAliveCellPutLatch, 0, replAliveCellCommitLatch, 0);
    checkUpcomingTransactionsState(partBrokenCellCreateLatch, 0, partBrokenCellPutLatch, 0, partBrokenCellCommitLatch, 0, partAliveCellCreateLatch, 0, partAliveCellPutLatch, 0, partAliveCellCommitLatch, 0);
    // Check that pre-failure transactions are absent.
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    0, brokenCellNodes, 0, aliveCellNodes, 0, partTxVers);
    checkTransactionsCount(orig != failed ? orig : null, /*stopped*/
    0, brokenCellNodes, 0, aliveCellNodes, 0, replTxVers);
    // Recovery finished on broken cell.
    assertTrue(waitForCondition(lsnrBrokenCell::check, 5000));
    for (IgniteInternalFuture<?> fut : futs) fut.get();
    for (Ignite node : G.allGrids()) {
        for (int key : partKeys) assertEquals(key, node.getOrCreateCache(PART_CACHE_NAME).get(key));
        for (int key : replKeys) assertEquals(key, node.getOrCreateCache(REPL_CACHE_NAME).get(key));
    }
    // Final check that any transactions are absent.
    checkTransactionsCount(null, 0, brokenCellNodes, 0, aliveCellNodes, 0, null);
}
Also used : Message(org.apache.ignite.plugin.extensions.communication.Message) ArrayList(java.util.ArrayList) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) Ignite(org.apache.ignite.Ignite) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) LogListener(org.apache.ignite.testframework.LogListener) IgniteCache(org.apache.ignite.IgniteCache) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) Transaction(org.apache.ignite.transactions.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionProxyImpl(org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl) Test(org.junit.Test)

Example 37 with GridConcurrentHashSet

use of org.apache.ignite.internal.util.GridConcurrentHashSet in project ignite by apache.

the class TxPessimisticDeadlockDetectionTest method doTestDeadlock.

/**
 * @throws Exception If failed.
 */
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final Object startKey) throws Exception {
    log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", startKey=" + startKey.getClass().getName() + ']');
    final AtomicInteger threadCnt = new AtomicInteger();
    final CyclicBarrier barrier = new CyclicBarrier(txCnt);
    final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
    final List<List<Object>> keySets = generateKeys(txCnt, startKey, loc, !lockPrimaryFirst);
    final Set<Object> involvedKeys = new GridConcurrentHashSet<>();
    final Set<Object> involvedLockedKeys = new GridConcurrentHashSet<>();
    final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
    IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {

        @Override
        public void run() {
            int threadNum = threadCnt.incrementAndGet();
            Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
            IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME).withAllowAtomicOpsInTx();
            List<Object> keys = keySets.get(threadNum - 1);
            int txTimeout = 500 + txCnt * 100;
            try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
                involvedTxs.add(((TransactionProxyImpl) tx).tx());
                Object key = keys.get(0);
                involvedKeys.add(key);
                Object k;
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", key=" + key + ']');
                cache.put(key, 0);
                involvedLockedKeys.add(key);
                barrier.await();
                key = keys.get(1);
                ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
                List<Object> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, incrementKey(key, 100 * threadNum));
                Map<Object, Integer> entries = new HashMap<>();
                involvedKeys.add(key);
                entries.put(key, 0);
                for (Object o : primaryKeys) {
                    involvedKeys.add(o);
                    entries.put(o, 1);
                    k = incrementKey(o, +13);
                    involvedKeys.add(k);
                    entries.put(k, 2);
                }
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", entries=" + entries + ']');
                cache.putAll(entries);
                tx.commit();
            } catch (Throwable e) {
                // At least one stack trace should contain TransactionDeadlockException.
                if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
                    if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class)))
                        U.error(log, "At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName(), e);
                }
            }
        }
    }, loc ? 2 : txCnt, "tx-thread");
    try {
        fut.get();
    } catch (IgniteCheckedException e) {
        U.error(null, "Unexpected exception", e);
        fail();
    }
    U.sleep(1000);
    TransactionDeadlockException deadlockE = deadlockErr.get();
    assertNotNull(deadlockE);
    checkAllTransactionsCompleted(involvedKeys, NODES_CNT * 2, CACHE_NAME);
    // Check deadlock report
    String msg = deadlockE.getMessage();
    for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
    for (Object key : involvedKeys) {
        if (involvedLockedKeys.contains(key))
            assertTrue(msg.contains("[key=" + key + ", cache=" + CACHE_NAME + ']'));
        else
            assertFalse(msg.contains("[key=" + key));
    }
}
Also used : TransactionDeadlockException(org.apache.ignite.transactions.TransactionDeadlockException) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ArrayList(java.util.ArrayList) List(java.util.List) Ignite(org.apache.ignite.Ignite) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) IgniteCache(org.apache.ignite.IgniteCache) AtomicReference(java.util.concurrent.atomic.AtomicReference) CyclicBarrier(java.util.concurrent.CyclicBarrier) Transaction(org.apache.ignite.transactions.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionTimeoutException(org.apache.ignite.transactions.TransactionTimeoutException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 38 with GridConcurrentHashSet

use of org.apache.ignite.internal.util.GridConcurrentHashSet in project ignite by apache.

the class IgniteClusterActivateDeactivateTestWithPersistence method testDeactivateDuringEvictionAndRebalance.

/**
 * Test that after deactivation during eviction and rebalance and activation again after
 * all data in cache is consistent.
 *
 * @throws Exception If failed.
 */
@Test
public void testDeactivateDuringEvictionAndRebalance() throws Exception {
    Assume.assumeFalse("https://issues.apache.org/jira/browse/IGNITE-7384", MvccFeatureChecker.forcedMvcc());
    IgniteEx srv = startGrids(3);
    srv.cluster().state(ACTIVE);
    CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME).setBackups(1).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setIndexedTypes(Integer.class, Integer.class).setAffinity(new RendezvousAffinityFunction(false, 64)).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL);
    IgniteCache cache = srv.createCache(ccfg);
    // High number of keys triggers long partition eviction.
    final int keysCount = 100_000;
    try (IgniteDataStreamer ds = srv.dataStreamer(DEFAULT_CACHE_NAME)) {
        log.info("Writing initial data...");
        ds.allowOverwrite(true);
        for (int k = 1; k <= keysCount; k++) {
            ds.addData(k, k);
            if (k % 50_000 == 0)
                log.info("Written " + k + " entities.");
        }
        log.info("Writing initial data finished.");
    }
    AtomicInteger keyCounter = new AtomicInteger(keysCount);
    AtomicBoolean stop = new AtomicBoolean(false);
    Set<Integer> addedKeys = new GridConcurrentHashSet<>();
    IgniteInternalFuture cacheLoadFuture = GridTestUtils.runMultiThreadedAsync(() -> {
        while (!stop.get()) {
            int key = keyCounter.incrementAndGet();
            try {
                cache.put(key, key);
                addedKeys.add(key);
                Thread.sleep(10);
            } catch (Exception ignored) {
            }
        }
    }, 2, "cache-load");
    stopGrid(2);
    // Wait for some data.
    Thread.sleep(3000);
    startGrid(2);
    log.info("Stop load...");
    stop.set(true);
    cacheLoadFuture.get();
    // Deactivate and activate again.
    srv.cluster().state(INACTIVE);
    srv.cluster().state(ACTIVE);
    awaitPartitionMapExchange();
    log.info("Checking data...");
    for (Ignite ignite : G.allGrids()) {
        IgniteCache cache1 = ignite.getOrCreateCache(DEFAULT_CACHE_NAME);
        for (int k = 1; k <= keysCount; k++) {
            Object val = cache1.get(k);
            Assert.assertNotNull("node=" + ignite.name() + ", key=" + k, val);
            Assert.assertTrue("node=" + ignite.name() + ", key=" + k + ", val=" + val, (int) val == k);
        }
        for (int k : addedKeys) {
            Object val = cache1.get(k);
            Assert.assertNotNull("node=" + ignite.name() + ", key=" + k, val);
            Assert.assertTrue("node=" + ignite.name() + ", key=" + k + ", val=" + val, (int) val == k);
        }
    }
}
Also used : IgniteCache(org.apache.ignite.IgniteCache) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteClusterReadOnlyException(org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) IgniteDataStreamer(org.apache.ignite.IgniteDataStreamer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IgniteEx(org.apache.ignite.internal.IgniteEx) RendezvousAffinityFunction(org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction) Ignite(org.apache.ignite.Ignite) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) Test(org.junit.Test)

Example 39 with GridConcurrentHashSet

use of org.apache.ignite.internal.util.GridConcurrentHashSet in project ignite by apache.

the class SocketStreamerSelfTest method test.

/**
 * @param converter Converter.
 * @param r Runnable..
 */
private void test(@Nullable SocketMessageConverter<Message> converter, @Nullable byte[] delim, Runnable r, boolean oneMessagePerTuple) throws Exception {
    SocketStreamer<Message, Integer, String> sockStmr = null;
    Ignite ignite = grid(0);
    IgniteCache<Integer, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
    cache.clear();
    try (IgniteDataStreamer<Integer, String> stmr = ignite.dataStreamer(DEFAULT_CACHE_NAME)) {
        stmr.allowOverwrite(true);
        stmr.autoFlushFrequency(10);
        sockStmr = new SocketStreamer<>();
        sockStmr.setIgnite(ignite);
        sockStmr.setStreamer(stmr);
        sockStmr.setPort(port);
        sockStmr.setDelimiter(delim);
        if (oneMessagePerTuple) {
            sockStmr.setSingleTupleExtractor(new StreamSingleTupleExtractor<Message, Integer, String>() {

                @Override
                public Map.Entry<Integer, String> extract(Message msg) {
                    return new IgniteBiTuple<>(msg.key, msg.val);
                }
            });
        } else {
            sockStmr.setMultipleTupleExtractor(new StreamMultipleTupleExtractor<Message, Integer, String>() {

                @Override
                public Map<Integer, String> extract(Message msg) {
                    Map<Integer, String> answer = new HashMap<>();
                    for (int value : msg.values) {
                        answer.put(value, Integer.toString(value));
                    }
                    return answer;
                }
            });
        }
        if (converter != null)
            sockStmr.setConverter(converter);
        final CountDownLatch latch = new CountDownLatch(CNT);
        final GridConcurrentHashSet<CacheEvent> evts = new GridConcurrentHashSet<>();
        IgniteBiPredicate<UUID, CacheEvent> locLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {

            @Override
            public boolean apply(UUID uuid, CacheEvent evt) {
                evts.add(evt);
                latch.countDown();
                return true;
            }
        };
        ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);
        sockStmr.start();
        r.run();
        latch.await();
        for (int i = 0; i < CNT; i++) {
            Object val = cache.get(i);
            String exp = Integer.toString(i);
            if (!exp.equals(val))
                log.error("Unexpected cache value [key=" + i + ", exp=" + exp + ", val=" + val + ", evts=" + evts + ']');
            assertEquals(exp, val);
        }
        assertEquals(CNT, cache.size(CachePeekMode.PRIMARY));
    } finally {
        if (sockStmr != null)
            sockStmr.stop();
    }
}
Also used : IgniteBiPredicate(org.apache.ignite.lang.IgniteBiPredicate) CountDownLatch(java.util.concurrent.CountDownLatch) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) CacheEvent(org.apache.ignite.events.CacheEvent) Ignite(org.apache.ignite.Ignite) UUID(java.util.UUID) HashMap(java.util.HashMap) Map(java.util.Map)

Example 40 with GridConcurrentHashSet

use of org.apache.ignite.internal.util.GridConcurrentHashSet in project ignite by apache.

the class TcpDiscoveryPendingMessageDeliveryTest method testPendingMessagesOverflow.

/**
 * @throws Exception If failed.
 */
@Test
public void testPendingMessagesOverflow() throws Exception {
    Ignite coord = startGrid("coordinator");
    TcpDiscoverySpi coordDisco = (TcpDiscoverySpi) coord.configuration().getDiscoverySpi();
    Set<TcpDiscoveryAbstractMessage> sentEnsuredMsgs = new GridConcurrentHashSet<>();
    coordDisco.addSendMessageListener(msg -> {
        if (coordDisco.ensured(msg))
            sentEnsuredMsgs.add(msg);
    });
    // Victim doesn't send acknowledges, so we need an intermediate node to accept messages,
    // so the coordinator could mark them as pending.
    Ignite mediator = startGrid("mediator");
    Ignite victim = startGrid("victim");
    startGrid("listener");
    sentEnsuredMsgs.clear();
    receivedEnsuredMsgs.clear();
    // Initial custom message will travel across the ring and will be discarded.
    sendDummyCustomMessage(coordDisco, IgniteUuid.randomUuid());
    assertTrue("Sent: " + sentEnsuredMsgs + "; received: " + receivedEnsuredMsgs, GridTestUtils.waitForCondition(() -> {
        log.info("Waiting for messages delivery");
        return receivedEnsuredMsgs.equals(sentEnsuredMsgs);
    }, 10000));
    blockMsgs = true;
    log.info("Sending dummy custom messages");
    // Non-discarded messages shouldn't be dropped from the queue.
    int msgsNum = 2000;
    for (int i = 0; i < msgsNum; i++) sendDummyCustomMessage(coordDisco, IgniteUuid.randomUuid());
    mediator.close();
    victim.close();
    assertTrue("Sent: " + sentEnsuredMsgs + "; received: " + receivedEnsuredMsgs, GridTestUtils.waitForCondition(() -> {
        log.info("Waiting for messages delivery [sentSize=" + sentEnsuredMsgs.size() + ", rcvdSize=" + receivedEnsuredMsgs.size() + ']');
        return receivedEnsuredMsgs.equals(sentEnsuredMsgs);
    }, 10000));
}
Also used : GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) TcpDiscoveryAbstractMessage(org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryAbstractMessage) Ignite(org.apache.ignite.Ignite) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Aggregations

GridConcurrentHashSet (org.apache.ignite.internal.util.GridConcurrentHashSet)42 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)22 Ignite (org.apache.ignite.Ignite)21 Test (org.junit.Test)21 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)16 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)16 IgniteException (org.apache.ignite.IgniteException)15 IOException (java.io.IOException)12 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)12 CountDownLatch (java.util.concurrent.CountDownLatch)10 UUID (java.util.UUID)9 IgniteCache (org.apache.ignite.IgniteCache)9 ArrayList (java.util.ArrayList)8 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 IgniteInterruptedException (org.apache.ignite.IgniteInterruptedException)8 IgniteLock (org.apache.ignite.IgniteLock)8 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)8 ExpectedException (org.junit.rules.ExpectedException)8 Transaction (org.apache.ignite.transactions.Transaction)7