Search in sources :

Example 11 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method testLeftPrimaryAndBackupNodes.

/**
     * @throws Exception If failed.
     */
public void testLeftPrimaryAndBackupNodes() throws Exception {
    if (cacheMode() == REPLICATED)
        return;
    this.backups = 1;
    final int SRV_NODES = 3;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    final Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    final CacheEventListener3 lsnr = asyncCallback() ? new CacheEventAsyncListener3() : new CacheEventListener3();
    qry.setLocalListener(lsnr);
    qry.setRemoteFilter(lsnr);
    IgniteCache<Object, Object> clnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    QueryCursor<Cache.Entry<Object, Object>> qryCur = clnCache.query(qry);
    Ignite igniteSrv = ignite(0);
    IgniteCache<Object, Object> srvCache = igniteSrv.cache(DEFAULT_CACHE_NAME);
    Affinity<Object> aff = affinity(srvCache);
    List<Integer> keys = testKeys(srvCache, 1);
    Collection<ClusterNode> nodes = aff.mapPartitionToPrimaryAndBackups(keys.get(0));
    Collection<UUID> ids = F.transform(nodes, new C1<ClusterNode, UUID>() {

        @Override
        public UUID apply(ClusterNode node) {
            return node.id();
        }
    });
    int keyIter = 0;
    boolean filtered = false;
    Map<Object, T2<Object, Object>> updates = new HashMap<>();
    final List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
    for (; keyIter < keys.size() / 2; keyIter++) {
        int key = keys.get(keyIter);
        log.info("Put [key=" + key + ", part=" + aff.partition(key) + ", filtered=" + filtered + ']');
        T2<Object, Object> t = updates.get(key);
        Integer val = filtered ? (key % 2 == 0 ? key + 1 : key) : key * 2;
        if (t == null) {
            updates.put(key, new T2<>((Object) val, null));
            if (!filtered)
                expEvts.add(new T3<>((Object) key, (Object) val, null));
        } else {
            updates.put(key, new T2<>((Object) val, (Object) key));
            if (!filtered)
                expEvts.add(new T3<>((Object) key, (Object) val, (Object) key));
        }
        srvCache.put(key, val);
        filtered = !filtered;
    }
    checkEvents(expEvts, lsnr, false);
    List<Thread> stopThreads = new ArrayList<>(3);
    // Stop nodes which owning this partition.
    for (int i = 0; i < SRV_NODES; i++) {
        Ignite ignite = ignite(i);
        if (ids.contains(ignite.cluster().localNode().id())) {
            final int i0 = i;
            TestCommunicationSpi spi = (TestCommunicationSpi) ignite.configuration().getCommunicationSpi();
            spi.skipAllMsg = true;
            stopThreads.add(new Thread() {

                @Override
                public void run() {
                    stopGrid(i0, true);
                }
            });
        }
    }
    // Stop and join threads.
    for (Thread t : stopThreads) t.start();
    for (Thread t : stopThreads) t.join();
    assert GridTestUtils.waitForCondition(new PA() {

        @Override
        public boolean apply() {
            // (SRV_NODES + 1 client node) - 1 primary - backup nodes.
            return qryClient.cluster().nodes().size() == (SRV_NODES + 1) - 1 - /* Primary node */
            backups;
        }
    }, 5000L);
    for (; keyIter < keys.size(); keyIter++) {
        int key = keys.get(keyIter);
        log.info("Put [key=" + key + ", filtered=" + filtered + ']');
        T2<Object, Object> t = updates.get(key);
        Integer val = filtered ? (key % 2 == 0 ? key + 1 : key) : key * 2;
        if (t == null) {
            updates.put(key, new T2<>((Object) val, null));
            if (!filtered)
                expEvts.add(new T3<>((Object) key, (Object) val, null));
        } else {
            updates.put(key, new T2<>((Object) val, (Object) key));
            if (!filtered)
                expEvts.add(new T3<>((Object) key, (Object) val, (Object) key));
        }
        clnCache.put(key, val);
        filtered = !filtered;
    }
    checkEvents(expEvts, lsnr, false);
    qryCur.close();
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) MutableEntry(javax.cache.processor.MutableEntry) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite) UUID(java.util.UUID) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) ClusterNode(org.apache.ignite.cluster.ClusterNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PA(org.apache.ignite.internal.util.typedef.PA)

Example 12 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method failoverStartStopFilter.

/**
     * @param backups Number of backups.
     * @throws Exception If failed.
     */
private void failoverStartStopFilter(int backups) throws Exception {
    this.backups = backups;
    final int SRV_NODES = 4;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> qryClnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    final CacheEventListener2 lsnr = new CacheEventListener2();
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    qry.setRemoteFilter(asyncCallback() ? new CacheEventAsyncFilter() : new CacheEventFilter());
    QueryCursor<?> cur = qryClnCache.query(qry);
    CacheEventListener2 dinLsnr = null;
    QueryCursor<?> dinQry = null;
    final AtomicBoolean stop = new AtomicBoolean();
    final AtomicReference<CountDownLatch> checkLatch = new AtomicReference<>();
    IgniteInternalFuture<?> restartFut = GridTestUtils.runAsync(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            while (!stop.get() && !err) {
                final int idx = ThreadLocalRandom.current().nextInt(SRV_NODES - 1);
                log.info("Stop node: " + idx);
                awaitPartitionMapExchange();
                Thread.sleep(400);
                stopGrid(idx);
                awaitPartitionMapExchange();
                Thread.sleep(400);
                log.info("Start node: " + idx);
                startGrid(idx);
                Thread.sleep(200);
                CountDownLatch latch = new CountDownLatch(1);
                assertTrue(checkLatch.compareAndSet(null, latch));
                if (!stop.get()) {
                    log.info("Wait for event check.");
                    assertTrue(latch.await(1, MINUTES));
                }
            }
            return null;
        }
    });
    final Map<Integer, Integer> vals = new HashMap<>();
    final Map<Integer, List<T2<Integer, Integer>>> expEvts = new HashMap<>();
    final List<T3<Object, Object, Object>> expEvtsNewLsnr = new ArrayList<>();
    final List<T3<Object, Object, Object>> expEvtsLsnr = new ArrayList<>();
    try {
        long stopTime = System.currentTimeMillis() + 60_000;
        // Start new filter each 5 sec.
        long startFilterTime = System.currentTimeMillis() + 5_000;
        final int PARTS = qryClient.affinity(DEFAULT_CACHE_NAME).partitions();
        ThreadLocalRandom rnd = ThreadLocalRandom.current();
        boolean filtered = false;
        boolean processorPut = false;
        while (System.currentTimeMillis() < stopTime) {
            Integer key = rnd.nextInt(PARTS);
            Integer prevVal = vals.get(key);
            Integer val = vals.get(key);
            if (System.currentTimeMillis() > startFilterTime) {
                // Stop filter and check events.
                if (dinQry != null) {
                    dinQry.close();
                    log.info("Continuous query listener closed. Await events: " + expEvtsNewLsnr.size());
                    checkEvents(expEvtsNewLsnr, dinLsnr, backups == 0);
                }
                dinLsnr = new CacheEventListener2();
                ContinuousQuery<Object, Object> newQry = new ContinuousQuery<>();
                newQry.setLocalListener(dinLsnr);
                newQry.setRemoteFilter(asyncCallback() ? new CacheEventAsyncFilter() : new CacheEventFilter());
                dinQry = qryClnCache.query(newQry);
                log.info("Continuous query listener started.");
                startFilterTime = System.currentTimeMillis() + 5_000;
            }
            if (val == null)
                val = 0;
            else
                val = Math.abs(val) + 1;
            if (filtered)
                val = -val;
            if (processorPut && prevVal != null) {
                qryClnCache.invoke(key, new CacheEntryProcessor<Object, Object, Void>() {

                    @Override
                    public Void process(MutableEntry<Object, Object> entry, Object... arguments) throws EntryProcessorException {
                        entry.setValue(arguments[0]);
                        return null;
                    }
                }, val);
            } else
                qryClnCache.put(key, val);
            processorPut = !processorPut;
            vals.put(key, val);
            if (val >= 0) {
                List<T2<Integer, Integer>> keyEvts = expEvts.get(key);
                if (keyEvts == null) {
                    keyEvts = new ArrayList<>();
                    expEvts.put(key, keyEvts);
                }
                keyEvts.add(new T2<>(val, prevVal));
                T3<Object, Object, Object> tupVal = new T3<>((Object) key, (Object) val, (Object) prevVal);
                expEvtsLsnr.add(tupVal);
                if (dinQry != null)
                    expEvtsNewLsnr.add(tupVal);
            }
            filtered = !filtered;
            CountDownLatch latch = checkLatch.get();
            if (latch != null) {
                log.info("Check events.");
                checkLatch.set(null);
                boolean success = false;
                try {
                    if (err)
                        break;
                    checkEvents(expEvtsLsnr, lsnr, backups == 0);
                    success = true;
                    log.info("Events checked.");
                } finally {
                    if (!success)
                        err = true;
                    latch.countDown();
                }
            }
        }
    } finally {
        stop.set(true);
    }
    CountDownLatch latch = checkLatch.get();
    if (latch != null)
        latch.countDown();
    restartFut.get();
    checkEvents(expEvtsLsnr, lsnr, backups == 0);
    lsnr.evts.clear();
    lsnr.vals.clear();
    if (dinQry != null) {
        checkEvents(expEvtsNewLsnr, dinLsnr, backups == 0);
        dinLsnr.evts.clear();
        dinLsnr.vals.clear();
    }
    List<T3<Object, Object, Object>> afterRestEvts = new ArrayList<>();
    for (int i = 0; i < qryClient.affinity(DEFAULT_CACHE_NAME).partitions(); i++) {
        Integer oldVal = (Integer) qryClnCache.get(i);
        qryClnCache.put(i, i);
        afterRestEvts.add(new T3<>((Object) i, (Object) i, (Object) oldVal));
    }
    checkEvents(new ArrayList<>(afterRestEvts), lsnr, false);
    cur.close();
    if (dinQry != null) {
        checkEvents(new ArrayList<>(afterRestEvts), dinLsnr, false);
        dinQry.close();
    }
    assertFalse("Unexpected error during test, see log for details.", err);
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) EntryProcessorException(javax.cache.processor.EntryProcessorException) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Ignite(org.apache.ignite.Ignite) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) CacheEntryListenerException(javax.cache.event.CacheEntryListenerException) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException) EntryProcessorException(javax.cache.processor.EntryProcessorException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CacheException(javax.cache.CacheException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 13 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method checkBackupQueue.

/**
     * @param backups Number of backups.
     * @param updateFromClient If {@code true} executes cache update from client node.
     * @throws Exception If failed.
     */
private void checkBackupQueue(int backups, boolean updateFromClient) throws Exception {
    this.backups = atomicityMode() == CacheAtomicityMode.ATOMIC ? backups : backups < 2 ? 2 : backups;
    final int SRV_NODES = 4;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> qryClientCache = qryClient.cache(DEFAULT_CACHE_NAME);
    Affinity<Object> aff = qryClient.affinity(DEFAULT_CACHE_NAME);
    CacheEventListener1 lsnr = asyncCallback() ? new CacheEventAsyncListener1(false) : new CacheEventListener1(false);
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    QueryCursor<?> cur = qryClientCache.query(qry);
    int PARTS = 10;
    Map<Object, T2<Object, Object>> updates = new HashMap<>();
    List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
    for (int i = 0; i < (atomicityMode() == CacheAtomicityMode.ATOMIC ? SRV_NODES - 1 : SRV_NODES - 2); i++) {
        log.info("Stop iteration: " + i);
        TestCommunicationSpi spi = (TestCommunicationSpi) ignite(i).configuration().getCommunicationSpi();
        Ignite ignite = ignite(i);
        IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
        List<Integer> keys = testKeys(cache, PARTS);
        CountDownLatch latch = new CountDownLatch(keys.size());
        lsnr.latch = latch;
        boolean first = true;
        for (Integer key : keys) {
            log.info("Put [node=" + ignite.name() + ", key=" + key + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (updateFromClient) {
                if (atomicityMode() == CacheAtomicityMode.TRANSACTIONAL) {
                    try (Transaction tx = qryClient.transactions().txStart()) {
                        qryClientCache.put(key, key);
                        tx.commit();
                    } catch (CacheException | ClusterTopologyException ignored) {
                        log.warning("Failed put. [Key=" + key + ", val=" + key + "]");
                        continue;
                    }
                } else
                    qryClientCache.put(key, key);
            } else {
                if (atomicityMode() == CacheAtomicityMode.TRANSACTIONAL) {
                    try (Transaction tx = ignite.transactions().txStart()) {
                        cache.put(key, key);
                        tx.commit();
                    } catch (CacheException | ClusterTopologyException ignored) {
                        log.warning("Failed put. [Key=" + key + ", val=" + key + "]");
                        continue;
                    }
                } else
                    cache.put(key, key);
            }
            if (t == null) {
                updates.put(key, new T2<>((Object) key, null));
                expEvts.add(new T3<>((Object) key, (Object) key, null));
            } else {
                updates.put(key, new T2<>((Object) key, (Object) key));
                expEvts.add(new T3<>((Object) key, (Object) key, (Object) key));
            }
            if (first) {
                spi.skipMsg = true;
                first = false;
            }
        }
        stopGrid(i);
        if (!latch.await(5, SECONDS)) {
            Set<Integer> keys0 = new HashSet<>(keys);
            keys0.removeAll(lsnr.keys);
            log.info("Missed events for keys: " + keys0);
            fail("Failed to wait for notifications [exp=" + keys.size() + ", left=" + lsnr.latch.getCount() + ']');
        }
        checkEvents(expEvts, lsnr);
    }
    for (int i = 0; i < (atomicityMode() == CacheAtomicityMode.ATOMIC ? SRV_NODES - 1 : SRV_NODES - 2); i++) {
        log.info("Start iteration: " + i);
        Ignite ignite = startGrid(i);
        IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
        List<Integer> keys = testKeys(cache, PARTS);
        CountDownLatch latch = new CountDownLatch(keys.size());
        lsnr.latch = latch;
        for (Integer key : keys) {
            log.info("Put [node=" + ignite.name() + ", key=" + key + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (t == null) {
                updates.put(key, new T2<>((Object) key, null));
                expEvts.add(new T3<>((Object) key, (Object) key, null));
            } else {
                updates.put(key, new T2<>((Object) key, (Object) key));
                expEvts.add(new T3<>((Object) key, (Object) key, (Object) key));
            }
            if (updateFromClient)
                qryClientCache.put(key, key);
            else
                cache.put(key, key);
        }
        if (!latch.await(10, SECONDS)) {
            Set<Integer> keys0 = new HashSet<>(keys);
            keys0.removeAll(lsnr.keys);
            log.info("Missed events for keys: " + keys0);
            fail("Failed to wait for notifications [exp=" + keys.size() + ", left=" + lsnr.latch.getCount() + ']');
        }
        checkEvents(expEvts, lsnr);
    }
    cur.close();
    assertFalse("Unexpected error during test, see log for details.", err);
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CacheException(javax.cache.CacheException) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) HashSet(java.util.HashSet) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Transaction(org.apache.ignite.transactions.Transaction) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException)

Example 14 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method testStartStopQuery.

/**
     * @throws Exception If failed.
     */
public void testStartStopQuery() throws Exception {
    this.backups = 1;
    final int SRV_NODES = 3;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    final Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> clnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    IgniteOutClosure<IgniteCache<Integer, Integer>> rndCache = new IgniteOutClosure<IgniteCache<Integer, Integer>>() {

        int cnt = 0;

        @Override
        public IgniteCache<Integer, Integer> apply() {
            ++cnt;
            return grid(cnt % SRV_NODES + 1).cache(DEFAULT_CACHE_NAME);
        }
    };
    Ignite igniteSrv = ignite(0);
    IgniteCache<Object, Object> srvCache = igniteSrv.cache(DEFAULT_CACHE_NAME);
    List<Integer> keys = testKeys(srvCache, 3);
    int keyCnt = keys.size();
    for (int j = 0; j < 50; ++j) {
        ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
        final CacheEventListener3 lsnr = asyncCallback() ? new CacheEventAsyncListener3() : new CacheEventListener3();
        qry.setLocalListener(lsnr);
        qry.setRemoteFilter(lsnr);
        int keyIter = 0;
        for (; keyIter < keyCnt / 2; keyIter++) {
            int key = keys.get(keyIter);
            rndCache.apply().put(key, key);
        }
        assert lsnr.evts.isEmpty();
        QueryCursor<Cache.Entry<Object, Object>> qryCur = clnCache.query(qry);
        Map<Object, T2<Object, Object>> updates = new HashMap<>();
        final List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
        Affinity<Object> aff = affinity(srvCache);
        boolean filtered = false;
        for (; keyIter < keys.size(); keyIter++) {
            int key = keys.get(keyIter);
            int val = filtered ? 1 : 2;
            log.info("Put [key=" + key + ", val=" + val + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (t == null) {
                // Check filtered.
                if (!filtered) {
                    updates.put(key, new T2<>((Object) val, null));
                    expEvts.add(new T3<>((Object) key, (Object) val, null));
                }
            } else {
                // Check filtered.
                if (!filtered) {
                    updates.put(key, new T2<>((Object) val, (Object) t.get1()));
                    expEvts.add(new T3<>((Object) key, (Object) val, (Object) t.get1()));
                }
            }
            rndCache.apply().put(key, val);
            filtered = !filtered;
        }
        checkEvents(expEvts, lsnr, false);
        qryCur.close();
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) MutableEntry(javax.cache.processor.MutableEntry) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) IgniteCache(org.apache.ignite.IgniteCache) IgniteOutClosure(org.apache.ignite.lang.IgniteOutClosure) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 15 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class CacheContinuousQueryFactoryFilterRandomOperationTest method doTestContinuousQuery.

/** {@inheritDoc} */
@Override
protected void doTestContinuousQuery(CacheConfiguration<Object, Object> ccfg, ContinuousDeploy deploy) throws Exception {
    ignite(0).createCache(ccfg);
    try {
        long seed = System.currentTimeMillis();
        Random rnd = new Random(seed);
        log.info("Random seed: " + seed);
        List<BlockingQueue<CacheEntryEvent<?, ?>>> evtsQueues = new ArrayList<>();
        Collection<QueryCursor<?>> curs = new ArrayList<>();
        Collection<T2<Integer, MutableCacheEntryListenerConfiguration>> lsnrCfgs = new ArrayList<>();
        if (deploy == CLIENT)
            evtsQueues.add(registerListener(ccfg.getName(), NODES - 1, curs, lsnrCfgs, rnd.nextBoolean()));
        else if (deploy == SERVER)
            evtsQueues.add(registerListener(ccfg.getName(), rnd.nextInt(NODES - 1), curs, lsnrCfgs, rnd.nextBoolean()));
        else {
            boolean isSync = rnd.nextBoolean();
            for (int i = 0; i < NODES - 1; i++) evtsQueues.add(registerListener(ccfg.getName(), i, curs, lsnrCfgs, isSync));
        }
        ConcurrentMap<Object, Object> expData = new ConcurrentHashMap<>();
        Map<Integer, Long> partCntr = new ConcurrentHashMap<>();
        try {
            for (int i = 0; i < ITERATION_CNT; i++) {
                if (i % 10 == 0)
                    log.info("Iteration: " + i);
                for (int idx = 0; idx < NODES; idx++) randomUpdate(rnd, evtsQueues, expData, partCntr, grid(idx).cache(ccfg.getName()));
            }
        } finally {
            for (QueryCursor<?> cur : curs) cur.close();
            for (T2<Integer, MutableCacheEntryListenerConfiguration> e : lsnrCfgs) grid(e.get1()).cache(ccfg.getName()).deregisterCacheEntryListener(e.get2());
        }
    } finally {
        ignite(0).destroyCache(ccfg.getName());
    }
}
Also used : BlockingQueue(java.util.concurrent.BlockingQueue) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) MutableCacheEntryListenerConfiguration(javax.cache.configuration.MutableCacheEntryListenerConfiguration) ArrayList(java.util.ArrayList) Random(java.util.Random) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) QueryCursor(org.apache.ignite.cache.query.QueryCursor) T2(org.apache.ignite.internal.util.typedef.T2)

Aggregations

T2 (org.apache.ignite.internal.util.typedef.T2)64 ArrayList (java.util.ArrayList)25 HashMap (java.util.HashMap)24 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)19 Map (java.util.Map)15 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)15 ClusterNode (org.apache.ignite.cluster.ClusterNode)14 UUID (java.util.UUID)13 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)13 Ignite (org.apache.ignite.Ignite)13 ContinuousQuery (org.apache.ignite.cache.query.ContinuousQuery)12 List (java.util.List)11 HashSet (java.util.HashSet)10 ConcurrentMap (java.util.concurrent.ConcurrentMap)10 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)8 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)7 CountDownLatch (java.util.concurrent.CountDownLatch)7 CacheException (javax.cache.CacheException)7 CacheEntryEvent (javax.cache.event.CacheEntryEvent)7 Set (java.util.Set)5