Search in sources :

Example 41 with ContinuousQuery

use of org.apache.ignite.cache.query.ContinuousQuery in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method testFirstFilteredEvent.

/**
 * @throws Exception If failed.
 */
public void testFirstFilteredEvent() throws Exception {
    this.backups = 2;
    final int SRV_NODES = 4;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> qryClnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    final CacheEventListener3 lsnr = new CacheEventListener3();
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    qry.setRemoteFilter(new CacheEventFilter());
    try (QueryCursor<?> cur = qryClnCache.query(qry)) {
        List<Integer> keys = testKeys(grid(0).cache(DEFAULT_CACHE_NAME), 1);
        for (Integer key : keys) qryClnCache.put(key, -1);
        qryClnCache.put(keys.get(0), 100);
        GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                return lsnr.evts.size() == 1;
            }
        }, 5000);
        assertEquals(lsnr.evts.size(), 1);
    }
}
Also used : GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite)

Example 42 with ContinuousQuery

use of org.apache.ignite.cache.query.ContinuousQuery in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method checkBackupQueue.

/**
 * @param backups Number of backups.
 * @param updateFromClient If {@code true} executes cache update from client node.
 * @throws Exception If failed.
 */
private void checkBackupQueue(int backups, boolean updateFromClient) throws Exception {
    this.backups = atomicityMode() == CacheAtomicityMode.ATOMIC ? backups : backups < 2 ? 2 : backups;
    final int SRV_NODES = 4;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> qryClientCache = qryClient.cache(DEFAULT_CACHE_NAME);
    Affinity<Object> aff = qryClient.affinity(DEFAULT_CACHE_NAME);
    CacheEventListener1 lsnr = asyncCallback() ? new CacheEventAsyncListener1(false) : new CacheEventListener1(false);
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    QueryCursor<?> cur = qryClientCache.query(qry);
    int PARTS = 10;
    Map<Object, T2<Object, Object>> updates = new HashMap<>();
    List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
    for (int i = 0; i < (atomicityMode() == CacheAtomicityMode.ATOMIC ? SRV_NODES - 1 : SRV_NODES - 2); i++) {
        log.info("Stop iteration: " + i);
        TestCommunicationSpi spi = (TestCommunicationSpi) ignite(i).configuration().getCommunicationSpi();
        Ignite ignite = ignite(i);
        IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
        List<Integer> keys = testKeys(cache, PARTS);
        CountDownLatch latch = new CountDownLatch(keys.size());
        lsnr.latch = latch;
        boolean first = true;
        for (Integer key : keys) {
            log.info("Put [node=" + ignite.name() + ", key=" + key + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (updateFromClient) {
                if (atomicityMode() == CacheAtomicityMode.TRANSACTIONAL) {
                    try (Transaction tx = qryClient.transactions().txStart()) {
                        qryClientCache.put(key, key);
                        tx.commit();
                    } catch (CacheException | ClusterTopologyException ignored) {
                        log.warning("Failed put. [Key=" + key + ", val=" + key + "]");
                        continue;
                    }
                } else
                    qryClientCache.put(key, key);
            } else {
                if (atomicityMode() == CacheAtomicityMode.TRANSACTIONAL) {
                    try (Transaction tx = ignite.transactions().txStart()) {
                        cache.put(key, key);
                        tx.commit();
                    } catch (CacheException | ClusterTopologyException ignored) {
                        log.warning("Failed put. [Key=" + key + ", val=" + key + "]");
                        continue;
                    }
                } else
                    cache.put(key, key);
            }
            if (t == null) {
                updates.put(key, new T2<>((Object) key, null));
                expEvts.add(new T3<>((Object) key, (Object) key, null));
            } else {
                updates.put(key, new T2<>((Object) key, (Object) key));
                expEvts.add(new T3<>((Object) key, (Object) key, (Object) key));
            }
            if (first) {
                spi.skipMsg = true;
                first = false;
            }
        }
        stopGrid(i);
        if (!latch.await(5, SECONDS)) {
            Set<Integer> keys0 = new HashSet<>(keys);
            keys0.removeAll(lsnr.keys);
            log.info("Missed events for keys: " + keys0);
            fail("Failed to wait for notifications [exp=" + keys.size() + ", left=" + lsnr.latch.getCount() + ']');
        }
        checkEvents(expEvts, lsnr);
    }
    for (int i = 0; i < (atomicityMode() == CacheAtomicityMode.ATOMIC ? SRV_NODES - 1 : SRV_NODES - 2); i++) {
        log.info("Start iteration: " + i);
        Ignite ignite = startGrid(i);
        IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
        List<Integer> keys = testKeys(cache, PARTS);
        CountDownLatch latch = new CountDownLatch(keys.size());
        lsnr.latch = latch;
        for (Integer key : keys) {
            log.info("Put [node=" + ignite.name() + ", key=" + key + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (t == null) {
                updates.put(key, new T2<>((Object) key, null));
                expEvts.add(new T3<>((Object) key, (Object) key, null));
            } else {
                updates.put(key, new T2<>((Object) key, (Object) key));
                expEvts.add(new T3<>((Object) key, (Object) key, (Object) key));
            }
            if (updateFromClient)
                qryClientCache.put(key, key);
            else
                cache.put(key, key);
        }
        if (!latch.await(10, SECONDS)) {
            Set<Integer> keys0 = new HashSet<>(keys);
            keys0.removeAll(lsnr.keys);
            log.info("Missed events for keys: " + keys0);
            fail("Failed to wait for notifications [exp=" + keys.size() + ", left=" + lsnr.latch.getCount() + ']');
        }
        checkEvents(expEvts, lsnr);
    }
    cur.close();
    assertFalse("Unexpected error during test, see log for details.", err);
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CacheException(javax.cache.CacheException) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) HashSet(java.util.HashSet) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Transaction(org.apache.ignite.transactions.Transaction) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException)

Example 43 with ContinuousQuery

use of org.apache.ignite.cache.query.ContinuousQuery in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method testStartStopQuery.

/**
 * @throws Exception If failed.
 */
public void testStartStopQuery() throws Exception {
    this.backups = 1;
    final int SRV_NODES = 3;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    final Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> clnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    IgniteOutClosure<IgniteCache<Integer, Integer>> rndCache = new IgniteOutClosure<IgniteCache<Integer, Integer>>() {

        int cnt = 0;

        @Override
        public IgniteCache<Integer, Integer> apply() {
            ++cnt;
            return grid(cnt % SRV_NODES + 1).cache(DEFAULT_CACHE_NAME);
        }
    };
    Ignite igniteSrv = ignite(0);
    IgniteCache<Object, Object> srvCache = igniteSrv.cache(DEFAULT_CACHE_NAME);
    List<Integer> keys = testKeys(srvCache, 3);
    int keyCnt = keys.size();
    for (int j = 0; j < 50; ++j) {
        ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
        final CacheEventListener3 lsnr = asyncCallback() ? new CacheEventAsyncListener3() : new CacheEventListener3();
        qry.setLocalListener(lsnr);
        qry.setRemoteFilter(lsnr);
        int keyIter = 0;
        for (; keyIter < keyCnt / 2; keyIter++) {
            int key = keys.get(keyIter);
            rndCache.apply().put(key, key);
        }
        assert lsnr.evts.isEmpty();
        QueryCursor<Cache.Entry<Object, Object>> qryCur = clnCache.query(qry);
        Map<Object, T2<Object, Object>> updates = new HashMap<>();
        final List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
        Affinity<Object> aff = affinity(srvCache);
        boolean filtered = false;
        for (; keyIter < keys.size(); keyIter++) {
            int key = keys.get(keyIter);
            int val = filtered ? 1 : 2;
            log.info("Put [key=" + key + ", val=" + val + ", part=" + aff.partition(key) + ']');
            T2<Object, Object> t = updates.get(key);
            if (t == null) {
                // Check filtered.
                if (!filtered) {
                    updates.put(key, new T2<>((Object) val, null));
                    expEvts.add(new T3<>((Object) key, (Object) val, null));
                }
            } else {
                // Check filtered.
                if (!filtered) {
                    updates.put(key, new T2<>((Object) val, (Object) t.get1()));
                    expEvts.add(new T3<>((Object) key, (Object) val, (Object) t.get1()));
                }
            }
            rndCache.apply().put(key, val);
            filtered = !filtered;
        }
        checkEvents(expEvts, lsnr, false);
        qryCur.close();
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) MutableEntry(javax.cache.processor.MutableEntry) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) Ignite(org.apache.ignite.Ignite) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) IgniteCache(org.apache.ignite.IgniteCache) IgniteOutClosure(org.apache.ignite.lang.IgniteOutClosure) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 44 with ContinuousQuery

use of org.apache.ignite.cache.query.ContinuousQuery in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method failoverStartStopFilter.

/**
 * @param backups Number of backups.
 * @throws Exception If failed.
 */
private void failoverStartStopFilter(int backups) throws Exception {
    this.backups = backups;
    final int SRV_NODES = 4;
    startGridsMultiThreaded(SRV_NODES);
    client = true;
    Ignite qryClient = startGrid(SRV_NODES);
    client = false;
    IgniteCache<Object, Object> qryClnCache = qryClient.cache(DEFAULT_CACHE_NAME);
    final CacheEventListener2 lsnr = new CacheEventListener2();
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    qry.setRemoteFilter(asyncCallback() ? new CacheEventAsyncFilter() : new CacheEventFilter());
    QueryCursor<?> cur = qryClnCache.query(qry);
    CacheEventListener2 dinLsnr = null;
    QueryCursor<?> dinQry = null;
    final AtomicBoolean stop = new AtomicBoolean();
    final AtomicReference<CountDownLatch> checkLatch = new AtomicReference<>();
    IgniteInternalFuture<?> restartFut = GridTestUtils.runAsync(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            while (!stop.get() && !err) {
                final int idx = ThreadLocalRandom.current().nextInt(SRV_NODES - 1);
                log.info("Stop node: " + idx);
                awaitPartitionMapExchange();
                Thread.sleep(400);
                stopGrid(idx);
                awaitPartitionMapExchange();
                Thread.sleep(400);
                log.info("Start node: " + idx);
                startGrid(idx);
                Thread.sleep(200);
                CountDownLatch latch = new CountDownLatch(1);
                assertTrue(checkLatch.compareAndSet(null, latch));
                if (!stop.get()) {
                    log.info("Wait for event check.");
                    assertTrue(latch.await(1, MINUTES));
                }
            }
            return null;
        }
    });
    final Map<Integer, Integer> vals = new HashMap<>();
    final Map<Integer, List<T2<Integer, Integer>>> expEvts = new HashMap<>();
    final List<T3<Object, Object, Object>> expEvtsNewLsnr = new ArrayList<>();
    final List<T3<Object, Object, Object>> expEvtsLsnr = new ArrayList<>();
    try {
        long stopTime = System.currentTimeMillis() + 60_000;
        // Start new filter each 5 sec.
        long startFilterTime = System.currentTimeMillis() + 5_000;
        final int PARTS = qryClient.affinity(DEFAULT_CACHE_NAME).partitions();
        ThreadLocalRandom rnd = ThreadLocalRandom.current();
        boolean filtered = false;
        boolean processorPut = false;
        while (System.currentTimeMillis() < stopTime) {
            Integer key = rnd.nextInt(PARTS);
            Integer prevVal = vals.get(key);
            Integer val = vals.get(key);
            if (System.currentTimeMillis() > startFilterTime) {
                // Stop filter and check events.
                if (dinQry != null) {
                    dinQry.close();
                    log.info("Continuous query listener closed. Await events: " + expEvtsNewLsnr.size());
                    checkEvents(expEvtsNewLsnr, dinLsnr, backups == 0);
                }
                dinLsnr = new CacheEventListener2();
                ContinuousQuery<Object, Object> newQry = new ContinuousQuery<>();
                newQry.setLocalListener(dinLsnr);
                newQry.setRemoteFilter(asyncCallback() ? new CacheEventAsyncFilter() : new CacheEventFilter());
                dinQry = qryClnCache.query(newQry);
                log.info("Continuous query listener started.");
                startFilterTime = System.currentTimeMillis() + 5_000;
            }
            if (val == null)
                val = 0;
            else
                val = Math.abs(val) + 1;
            if (filtered)
                val = -val;
            if (processorPut && prevVal != null) {
                qryClnCache.invoke(key, new CacheEntryProcessor<Object, Object, Void>() {

                    @Override
                    public Void process(MutableEntry<Object, Object> entry, Object... arguments) throws EntryProcessorException {
                        entry.setValue(arguments[0]);
                        return null;
                    }
                }, val);
            } else
                qryClnCache.put(key, val);
            processorPut = !processorPut;
            vals.put(key, val);
            if (val >= 0) {
                List<T2<Integer, Integer>> keyEvts = expEvts.get(key);
                if (keyEvts == null) {
                    keyEvts = new ArrayList<>();
                    expEvts.put(key, keyEvts);
                }
                keyEvts.add(new T2<>(val, prevVal));
                T3<Object, Object, Object> tupVal = new T3<>((Object) key, (Object) val, (Object) prevVal);
                expEvtsLsnr.add(tupVal);
                if (dinQry != null)
                    expEvtsNewLsnr.add(tupVal);
            }
            filtered = !filtered;
            CountDownLatch latch = checkLatch.get();
            if (latch != null) {
                log.info("Check events.");
                checkLatch.set(null);
                boolean success = false;
                try {
                    if (err)
                        break;
                    checkEvents(expEvtsLsnr, lsnr, backups == 0);
                    success = true;
                    log.info("Events checked.");
                } finally {
                    if (!success)
                        err = true;
                    latch.countDown();
                }
            }
        }
    } finally {
        stop.set(true);
    }
    CountDownLatch latch = checkLatch.get();
    if (latch != null)
        latch.countDown();
    restartFut.get();
    checkEvents(expEvtsLsnr, lsnr, backups == 0);
    lsnr.evts.clear();
    lsnr.vals.clear();
    if (dinQry != null) {
        checkEvents(expEvtsNewLsnr, dinLsnr, backups == 0);
        dinLsnr.evts.clear();
        dinLsnr.vals.clear();
    }
    List<T3<Object, Object, Object>> afterRestEvts = new ArrayList<>();
    for (int i = 0; i < qryClient.affinity(DEFAULT_CACHE_NAME).partitions(); i++) {
        Integer oldVal = (Integer) qryClnCache.get(i);
        qryClnCache.put(i, i);
        afterRestEvts.add(new T3<>((Object) i, (Object) i, (Object) oldVal));
    }
    checkEvents(new ArrayList<>(afterRestEvts), lsnr, false);
    cur.close();
    if (dinQry != null) {
        checkEvents(new ArrayList<>(afterRestEvts), dinLsnr, false);
        dinQry.close();
    }
    assertFalse("Unexpected error during test, see log for details.", err);
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) EntryProcessorException(javax.cache.processor.EntryProcessorException) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Ignite(org.apache.ignite.Ignite) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) T2(org.apache.ignite.internal.util.typedef.T2) T3(org.apache.ignite.internal.util.typedef.T3) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) CacheEntryListenerException(javax.cache.event.CacheEntryListenerException) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException) EntryProcessorException(javax.cache.processor.EntryProcessorException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CacheException(javax.cache.CacheException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 45 with ContinuousQuery

use of org.apache.ignite.cache.query.ContinuousQuery in project ignite by apache.

the class CacheContinuousQueryFailoverAbstractSelfTest method testBackupQueueEvict.

/**
 * @throws Exception If failed.
 */
public void testBackupQueueEvict() throws Exception {
    startGridsMultiThreaded(2);
    client = true;
    Ignite qryClient = startGrid(2);
    CacheEventListener1 lsnr = new CacheEventListener1(false);
    ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    QueryCursor<?> cur = qryClient.cache(DEFAULT_CACHE_NAME).query(qry);
    assertEquals(0, backupQueue(ignite(0)).size());
    long ttl = 100;
    final ExpiryPolicy expiry = new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl));
    final IgniteCache<Object, Object> cache0 = ignite(2).cache(DEFAULT_CACHE_NAME).withExpiryPolicy(expiry);
    final List<Integer> keys = primaryKeys(ignite(1).cache(DEFAULT_CACHE_NAME), BACKUP_ACK_THRESHOLD);
    lsnr.latch = new CountDownLatch(keys.size());
    for (Integer key : keys) {
        log.info("Put: " + key);
        cache0.put(key, key);
    }
    GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            return backupQueue(ignite(0)).isEmpty();
        }
    }, 5000);
    assertTrue("Backup queue is not cleared: " + backupQueue(ignite(0)), backupQueue(ignite(0)).size() < BACKUP_ACK_THRESHOLD);
    boolean wait = waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            return cache0.localPeek(keys.get(0)) == null;
        }
    }, ttl + 1000);
    assertTrue("Entry evicted.", wait);
    GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            return backupQueue(ignite(0)).isEmpty();
        }
    }, 2000);
    assertTrue("Backup queue is not cleared: " + backupQueue(ignite(0)), backupQueue(ignite(0)).size() < BACKUP_ACK_THRESHOLD);
    if (backupQueue(ignite(0)).size() != 0) {
        for (Object o : backupQueue(ignite(0))) {
            CacheContinuousQueryEntry e = (CacheContinuousQueryEntry) o;
            assertNotSame("Evicted entry added to backup queue.", -1L, e.updateCounter());
        }
    }
    cur.close();
}
Also used : GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) Duration(javax.cache.expiry.Duration) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) TouchedExpiryPolicy(javax.cache.expiry.TouchedExpiryPolicy) ExpiryPolicy(javax.cache.expiry.ExpiryPolicy) Ignite(org.apache.ignite.Ignite) TouchedExpiryPolicy(javax.cache.expiry.TouchedExpiryPolicy)

Aggregations

ContinuousQuery (org.apache.ignite.cache.query.ContinuousQuery)87 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)50 Ignite (org.apache.ignite.Ignite)43 CacheEntryEvent (javax.cache.event.CacheEntryEvent)42 CountDownLatch (java.util.concurrent.CountDownLatch)38 ArrayList (java.util.ArrayList)27 QueryCursor (org.apache.ignite.cache.query.QueryCursor)23 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)22 CacheEntryListenerException (javax.cache.event.CacheEntryListenerException)19 IgniteCache (org.apache.ignite.IgniteCache)19 IgniteException (org.apache.ignite.IgniteException)17 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)15 HashMap (java.util.HashMap)14 CacheEntryUpdatedListener (javax.cache.event.CacheEntryUpdatedListener)14 PA (org.apache.ignite.internal.util.typedef.PA)13 T2 (org.apache.ignite.internal.util.typedef.T2)13 List (java.util.List)12 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)10 ThreadLocalRandom (java.util.concurrent.ThreadLocalRandom)10 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9