use of org.apache.ignite.internal.util.typedef.PA in project ignite by apache.
the class CacheContinuousQueryFailoverAbstractSelfTest method testLeftPrimaryAndBackupNodes.
/**
* @throws Exception If failed.
*/
public void testLeftPrimaryAndBackupNodes() throws Exception {
if (cacheMode() == REPLICATED)
return;
this.backups = 1;
final int SRV_NODES = 3;
startGridsMultiThreaded(SRV_NODES);
client = true;
final Ignite qryClient = startGrid(SRV_NODES);
client = false;
ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
final CacheEventListener3 lsnr = asyncCallback() ? new CacheEventAsyncListener3() : new CacheEventListener3();
qry.setLocalListener(lsnr);
qry.setRemoteFilter(lsnr);
IgniteCache<Object, Object> clnCache = qryClient.cache(DEFAULT_CACHE_NAME);
QueryCursor<Cache.Entry<Object, Object>> qryCur = clnCache.query(qry);
Ignite igniteSrv = ignite(0);
IgniteCache<Object, Object> srvCache = igniteSrv.cache(DEFAULT_CACHE_NAME);
Affinity<Object> aff = affinity(srvCache);
List<Integer> keys = testKeys(srvCache, 1);
Collection<ClusterNode> nodes = aff.mapPartitionToPrimaryAndBackups(keys.get(0));
Collection<UUID> ids = F.transform(nodes, new C1<ClusterNode, UUID>() {
@Override
public UUID apply(ClusterNode node) {
return node.id();
}
});
int keyIter = 0;
boolean filtered = false;
Map<Object, T2<Object, Object>> updates = new HashMap<>();
final List<T3<Object, Object, Object>> expEvts = new ArrayList<>();
for (; keyIter < keys.size() / 2; keyIter++) {
int key = keys.get(keyIter);
log.info("Put [key=" + key + ", part=" + aff.partition(key) + ", filtered=" + filtered + ']');
T2<Object, Object> t = updates.get(key);
Integer val = filtered ? (key % 2 == 0 ? key + 1 : key) : key * 2;
if (t == null) {
updates.put(key, new T2<>((Object) val, null));
if (!filtered)
expEvts.add(new T3<>((Object) key, (Object) val, null));
} else {
updates.put(key, new T2<>((Object) val, (Object) key));
if (!filtered)
expEvts.add(new T3<>((Object) key, (Object) val, (Object) key));
}
srvCache.put(key, val);
filtered = !filtered;
}
checkEvents(expEvts, lsnr, false);
List<Thread> stopThreads = new ArrayList<>(3);
// Stop nodes which owning this partition.
for (int i = 0; i < SRV_NODES; i++) {
Ignite ignite = ignite(i);
if (ids.contains(ignite.cluster().localNode().id())) {
final int i0 = i;
TestCommunicationSpi spi = (TestCommunicationSpi) ignite.configuration().getCommunicationSpi();
spi.skipAllMsg = true;
stopThreads.add(new Thread() {
@Override
public void run() {
stopGrid(i0, true);
}
});
}
}
// Stop and join threads.
for (Thread t : stopThreads) t.start();
for (Thread t : stopThreads) t.join();
assert GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
// (SRV_NODES + 1 client node) - 1 primary - backup nodes.
return qryClient.cluster().nodes().size() == (SRV_NODES + 1) - 1 - /* Primary node */
backups;
}
}, 5000L);
for (; keyIter < keys.size(); keyIter++) {
int key = keys.get(keyIter);
log.info("Put [key=" + key + ", filtered=" + filtered + ']');
T2<Object, Object> t = updates.get(key);
Integer val = filtered ? (key % 2 == 0 ? key + 1 : key) : key * 2;
if (t == null) {
updates.put(key, new T2<>((Object) val, null));
if (!filtered)
expEvts.add(new T3<>((Object) key, (Object) val, null));
} else {
updates.put(key, new T2<>((Object) val, (Object) key));
if (!filtered)
expEvts.add(new T3<>((Object) key, (Object) val, (Object) key));
}
clnCache.put(key, val);
filtered = !filtered;
}
checkEvents(expEvts, lsnr, false);
qryCur.close();
}
use of org.apache.ignite.internal.util.typedef.PA in project ignite by apache.
the class CacheContinuousQueryFailoverAbstractSelfTest method checkEvents.
/**
* @param expEvts Expected events.
* @param lsnr Listener.
* @param lostAllow If {@code true} than won't assert on lost events.
* @param wait Wait flag.
* @throws Exception If failed.
*/
private void checkEvents(final List<T3<Object, Object, Object>> expEvts, final CacheEventListener2 lsnr, boolean lostAllow, boolean wait) throws Exception {
if (wait) {
GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return expEvts.size() == lsnr.size();
}
}, 10_000L);
}
synchronized (lsnr) {
Map<Integer, List<CacheEntryEvent<?, ?>>> prevMap = new HashMap<>(lsnr.evts.size());
for (Map.Entry<Integer, List<CacheEntryEvent<?, ?>>> e : lsnr.evts.entrySet()) prevMap.put(e.getKey(), new ArrayList<>(e.getValue()));
List<T3<Object, Object, Object>> lostEvts = new ArrayList<>();
for (T3<Object, Object, Object> exp : expEvts) {
List<CacheEntryEvent<?, ?>> rcvdEvts = lsnr.evts.get(exp.get1());
if (F.eq(exp.get2(), exp.get3()))
continue;
if (rcvdEvts == null || rcvdEvts.isEmpty()) {
lostEvts.add(exp);
continue;
}
Iterator<CacheEntryEvent<?, ?>> iter = rcvdEvts.iterator();
boolean found = false;
while (iter.hasNext()) {
CacheEntryEvent<?, ?> e = iter.next();
if ((exp.get2() != null && e.getValue() != null && exp.get2().equals(e.getValue())) && equalOldValue(e, exp)) {
found = true;
iter.remove();
break;
}
}
// Lost event is acceptable.
if (!found)
lostEvts.add(exp);
}
boolean dup = false;
// Check duplicate.
if (!lsnr.evts.isEmpty()) {
for (List<CacheEntryEvent<?, ?>> evts : lsnr.evts.values()) {
if (!evts.isEmpty()) {
for (CacheEntryEvent<?, ?> e : evts) {
boolean found = false;
for (T3<Object, Object, Object> lostEvt : lostEvts) {
if (e.getKey().equals(lostEvt.get1()) && e.getValue().equals(lostEvt.get2())) {
found = true;
lostEvts.remove(lostEvt);
break;
}
}
if (!found) {
dup = true;
break;
}
}
}
}
if (dup) {
for (List<CacheEntryEvent<?, ?>> e : lsnr.evts.values()) {
if (!e.isEmpty()) {
for (CacheEntryEvent<?, ?> event : e) log.error("Got duplicate event: " + event);
}
}
}
}
if (!lostAllow && lostEvts.size() > 100) {
log.error("Lost event cnt: " + lostEvts.size());
for (T3<Object, Object, Object> e : lostEvts) log.error("Lost event: " + e);
fail("Lose events, see log for details.");
}
log.error("Lost event cnt: " + lostEvts.size());
expEvts.clear();
lsnr.evts.clear();
lsnr.vals.clear();
}
}
use of org.apache.ignite.internal.util.typedef.PA in project ignite by apache.
the class CacheContinuousQueryCounterAbstractTest method testTwoQueryListener.
/**
* @throws Exception If failed.
*/
public void testTwoQueryListener() throws Exception {
if (cacheMode() == LOCAL)
return;
final IgniteCache<Integer, Integer> cache = grid(0).cache(CACHE_NAME);
final IgniteCache<Integer, Integer> cache1 = grid(1).cache(CACHE_NAME);
final AtomicInteger cntr = new AtomicInteger(0);
final AtomicInteger cntr1 = new AtomicInteger(0);
final ContinuousQuery<Integer, Integer> qry1 = new ContinuousQuery<>();
final ContinuousQuery<Integer, Integer> qry2 = new ContinuousQuery<>();
final Map<Integer, List<T2<Integer, Long>>> map1 = new HashMap<>();
final Map<Integer, List<T2<Integer, Long>>> map2 = new HashMap<>();
qry1.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends Integer> e : evts) {
cntr.incrementAndGet();
synchronized (map1) {
List<T2<Integer, Long>> vals = map1.get(e.getKey());
if (vals == null) {
vals = new ArrayList<>();
map1.put(e.getKey(), vals);
}
vals.add(new T2<>(e.getValue(), e.unwrap(CacheQueryEntryEvent.class).getPartitionUpdateCounter()));
}
}
}
});
qry2.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends Integer> e : evts) {
cntr1.incrementAndGet();
synchronized (map2) {
List<T2<Integer, Long>> vals = map2.get(e.getKey());
if (vals == null) {
vals = new ArrayList<>();
map2.put(e.getKey(), vals);
}
vals.add(new T2<>(e.getValue(), e.unwrap(CacheQueryEntryEvent.class).getPartitionUpdateCounter()));
}
}
}
});
try (QueryCursor<Cache.Entry<Integer, Integer>> query2 = cache1.query(qry2);
QueryCursor<Cache.Entry<Integer, Integer>> query1 = cache.query(qry1)) {
for (int i = 0; i < gridCount(); i++) {
IgniteCache<Object, Object> cache0 = grid(i).cache(CACHE_NAME);
cache0.put(1, 1);
cache0.put(2, 2);
cache0.put(3, 3);
cache0.remove(1);
cache0.remove(2);
cache0.remove(3);
final int iter = i + 1;
assert GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return iter * 6 * /* count operation */
2 == /* count continues queries*/
(cntr.get() + cntr1.get());
}
}, 5000L);
checkEvents(map1, i);
map1.clear();
checkEvents(map2, i);
map2.clear();
}
}
}
use of org.apache.ignite.internal.util.typedef.PA in project ignite by apache.
the class CacheContinuousQueryCounterAbstractTest method testRestartQuery.
/**
* @throws Exception If failed.
*/
public void testRestartQuery() throws Exception {
IgniteCache<Integer, Integer> cache = grid(0).cache(CACHE_NAME);
final int keyCnt = 300;
final int updateKey = 1;
for (int i = 0; i < keyCnt; i++) cache.put(updateKey, i);
for (int i = 0; i < 10; i++) {
if (i % 2 == 0) {
final AtomicInteger cntr = new AtomicInteger(0);
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
final List<T2<Integer, Long>> vals = new ArrayList<>();
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends Integer> e : evts) {
synchronized (vals) {
cntr.incrementAndGet();
vals.add(new T2<>(e.getValue(), e.unwrap(CacheQueryEntryEvent.class).getPartitionUpdateCounter()));
}
}
}
});
try (QueryCursor<Cache.Entry<Integer, Integer>> ignore = cache.query(qry)) {
for (int key = 0; key < keyCnt; key++) cache.put(updateKey, cache.get(updateKey) + 1);
assert GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return cntr.get() == keyCnt;
}
}, 2000L);
synchronized (vals) {
for (T2<Integer, Long> val : vals) assertEquals((long) val.get1() + 1, (long) val.get2());
}
}
} else {
for (int key = 0; key < keyCnt; key++) cache.put(updateKey, cache.get(updateKey) + 1);
}
}
}
use of org.apache.ignite.internal.util.typedef.PA in project ignite by apache.
the class CacheContinuousQueryOperationFromCallbackTest method doTest.
/**
* @param ccfg Cache configuration.
* @throws Exception If failed.
*/
protected void doTest(final CacheConfiguration ccfg, boolean fromLsnr) throws Exception {
ignite(0).createCache(ccfg);
List<QueryCursor<?>> qries = new ArrayList<>();
assertEquals(0, filterCbCntr.get());
try {
List<Set<T2<QueryTestKey, QueryTestValue>>> rcvdEvts = new ArrayList<>(NODES);
List<Set<T2<QueryTestKey, QueryTestValue>>> evtsFromCallbacks = new ArrayList<>(NODES);
final AtomicInteger qryCntr = new AtomicInteger(0);
final AtomicInteger cbCntr = new AtomicInteger(0);
final int threadCnt = SYSTEM_POOL_SIZE * 2;
for (int idx = 0; idx < NODES; idx++) {
Set<T2<QueryTestKey, QueryTestValue>> evts = Collections.newSetFromMap(new ConcurrentHashMap<T2<QueryTestKey, QueryTestValue>, Boolean>());
Set<T2<QueryTestKey, QueryTestValue>> evtsFromCb = Collections.newSetFromMap(new ConcurrentHashMap<T2<QueryTestKey, QueryTestValue>, Boolean>());
IgniteCache<Object, Object> cache = grid(idx).getOrCreateCache(ccfg.getName());
ContinuousQuery qry = new ContinuousQuery();
qry.setLocalListener(new TestCacheAsyncEventListener(evts, evtsFromCb, fromLsnr ? cache : null, qryCntr, cbCntr));
if (!fromLsnr)
qry.setRemoteFilterFactory(FactoryBuilder.factoryOf(new CacheTestRemoteFilterAsync(ccfg.getName())));
rcvdEvts.add(evts);
evtsFromCallbacks.add(evtsFromCb);
QueryCursor qryCursor = cache.query(qry);
qries.add(qryCursor);
}
IgniteInternalFuture<Long> f = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
for (int i = 0; i < ITERATION_CNT; i++) {
IgniteCache<QueryTestKey, QueryTestValue> cache = grid(rnd.nextInt(NODES)).cache(ccfg.getName());
QueryTestKey key = new QueryTestKey(rnd.nextInt(KEYS));
boolean startTx = cache.getConfiguration(CacheConfiguration.class).getAtomicityMode() == TRANSACTIONAL && rnd.nextBoolean();
Transaction tx = null;
if (startTx)
tx = cache.unwrap(Ignite.class).transactions().txStart();
try {
if ((cache.get(key) == null) || rnd.nextBoolean())
cache.invoke(key, new IncrementTestEntryProcessor());
else {
QueryTestValue val;
QueryTestValue newVal;
do {
val = cache.get(key);
newVal = val == null ? new QueryTestValue(0) : new QueryTestValue(val.val1 + 1);
} while (!cache.replace(key, val, newVal));
}
} finally {
if (tx != null)
tx.commit();
}
}
}
}, threadCnt, "put-thread");
f.get(30, TimeUnit.SECONDS);
assert GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return qryCntr.get() >= ITERATION_CNT * threadCnt * NODES;
}
}, TimeUnit.MINUTES.toMillis(2));
for (Set<T2<QueryTestKey, QueryTestValue>> set : rcvdEvts) checkEvents(set, ITERATION_CNT * threadCnt, grid(0).cache(ccfg.getName()), false);
if (fromLsnr) {
final int expCnt = qryCntr.get() * NODES * KEYS_FROM_CALLBACK;
boolean res = GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return cbCntr.get() >= expCnt;
}
}, TimeUnit.SECONDS.toMillis(60));
assertTrue("Failed to wait events [exp=" + expCnt + ", act=" + cbCntr.get() + "]", res);
assertEquals(expCnt, cbCntr.get());
for (Set<T2<QueryTestKey, QueryTestValue>> set : evtsFromCallbacks) checkEvents(set, qryCntr.get() * KEYS_FROM_CALLBACK, grid(0).cache(ccfg.getName()), true);
} else {
final int expInvkCnt = ITERATION_CNT * threadCnt * (ccfg.getCacheMode() != REPLICATED ? (ccfg.getBackups() + 1) : NODES - 1) * NODES;
GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return filterCbCntr.get() >= expInvkCnt;
}
}, TimeUnit.SECONDS.toMillis(60));
assertEquals(expInvkCnt, filterCbCntr.get());
for (Set<T2<QueryTestKey, QueryTestValue>> set : evtsFromCallbacks) checkEvents(set, expInvkCnt * KEYS_FROM_CALLBACK, grid(0).cache(ccfg.getName()), true);
}
} finally {
for (QueryCursor<?> qry : qries) qry.close();
ignite(0).destroyCache(ccfg.getName());
}
}
Aggregations