use of javax.cache.Cache in project hazelcast by hazelcast.
the class CacheExpirationManagerTest method expiration_task_starts_on_new_node_after_migration_when_there_is_expirable_entry.
@Test
public void expiration_task_starts_on_new_node_after_migration_when_there_is_expirable_entry() {
Config config = getConfig();
config.setProperty(taskPeriodSecondsPropName(), "1");
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
final HazelcastInstance node1 = factory.newHazelcastInstance(config);
CacheManager cacheManager = createCacheManager(node1);
Cache cache = cacheManager.createCache("test", new CacheConfig());
((ICache) cache).put(1, 1, new HazelcastExpiryPolicy(ONE_HOUR, ONE_HOUR, ONE_HOUR));
final HazelcastInstance node2 = factory.newHazelcastInstance(config);
node1.shutdown();
assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertTrue("There should be one ClearExpiredRecordsTask started", hasClearExpiredRecordsTaskStarted(node2));
}
});
}
use of javax.cache.Cache in project ignite by apache.
the class CacheDataPageScanQueryTest method testDataPageScanWithRestart.
/**
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
@Test
@Ignore("https://issues.apache.org/jira/browse/IGNITE-11998")
public void testDataPageScanWithRestart() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
IgniteInternalCache<Long, String> cache = ignite.cachex(CACHE);
CacheGroupMetricsImpl metrics = cache.context().group().metrics();
DataRegionMetricsImpl rmx = cache.context().dataRegion().metrics();
long maxKey = 10_000;
Map<Long, String> map = new ConcurrentHashMap<>();
int threads = 16;
AtomicInteger threadShift = new AtomicInteger();
multithreaded((Callable<Void>) () -> {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int shift = threadShift.getAndIncrement();
for (int i = shift; i < maxKey; i += threads) {
Long k = (long) i;
// Bigger than single page.
String v = GridTestUtils.randomString(rnd, 6 * 1024);
cache.put(k, v);
map.put(k, v);
}
return null;
}, threads);
assertEquals(map.size(), cache.size());
info("Page mem : " + rmx.getPhysicalMemorySize());
info("Alloc size: " + metrics.getTotalAllocatedSize());
info("Store size: " + metrics.getStorageSize());
HashMap<Long, String> map2 = new HashMap<>(map);
IgniteCache<Long, String> c = ignite.cache(CACHE);
for (Cache.Entry<Long, String> e : c.query(new ScanQuery<Long, String>()).getAll()) assertEquals(e.getValue(), map.remove(e.getKey()));
assertTrue(map.isEmpty());
assertTrue(CacheDataTree.isLastFindWithDataPageScan());
stopAllGrids(true);
ignite = startGrid(0);
ignite.cluster().active(true);
c = ignite.cache(CACHE);
for (Cache.Entry<Long, String> e : c.query(new ScanQuery<Long, String>()).getAll()) assertEquals(e.getValue(), map2.remove(e.getKey()));
assertTrue(map2.isEmpty());
assertTrue(CacheDataTree.isLastFindWithDataPageScan());
}
use of javax.cache.Cache in project ignite by apache.
the class H2IndexingAbstractGeoSelfTest method checkGeoMultithreaded.
/**
* Check geo indexing multithreaded with dynamic index creation.
*
* @param dynamic Whether index should be created dynamically.
* @throws Exception If failed.
*/
@SuppressWarnings("unchecked")
private void checkGeoMultithreaded(boolean dynamic) throws Exception {
final IgniteCache<Integer, EnemyCamp> cache1 = createCache("camp", true, Integer.class, EnemyCamp.class, dynamic);
final IgniteCache<Integer, EnemyCamp> cache2 = grid(1).cache("camp");
final IgniteCache<Integer, EnemyCamp> cache3 = grid(2).cache("camp");
try {
final String[] points = new String[CNT];
WKTReader r = new WKTReader();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
for (int idx = 0; idx < CNT; idx++) {
int x = rnd.nextInt(1, 100);
int y = rnd.nextInt(1, 100);
cache1.getAndPut(idx, new EnemyCamp(r.read("POINT(" + x + " " + y + ")"), Integer.toString(idx)));
points[idx] = Integer.toString(idx);
}
Thread.sleep(200);
final AtomicBoolean stop = new AtomicBoolean();
final AtomicReference<Exception> err = new AtomicReference<>();
IgniteInternalFuture<?> putFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
WKTReader r = new WKTReader();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
while (!stop.get()) {
int cacheIdx = rnd.nextInt(0, 3);
IgniteCache<Integer, EnemyCamp> cache = cacheIdx == 0 ? cache1 : cacheIdx == 1 ? cache2 : cache3;
int idx = rnd.nextInt(CNT);
int x = rnd.nextInt(1, 100);
int y = rnd.nextInt(1, 100);
cache.getAndPut(idx, new EnemyCamp(r.read("POINT(" + x + " " + y + ")"), Integer.toString(idx)));
U.sleep(50);
}
return null;
}
}, Runtime.getRuntime().availableProcessors(), "put-thread");
IgniteInternalFuture<?> qryFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
WKTReader r = new WKTReader();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
while (!stop.get()) {
try {
int cacheIdx = rnd.nextInt(0, 3);
IgniteCache<Integer, EnemyCamp> cache = cacheIdx == 0 ? cache1 : cacheIdx == 1 ? cache2 : cache3;
SqlQuery<Integer, EnemyCamp> qry = new SqlQuery<>(EnemyCamp.class, "coords && ?");
Collection<Cache.Entry<Integer, EnemyCamp>> res = cache.query(qry.setArgs(r.read("POLYGON((0 0, 0 100, 100 100, 100 0, 0 0))"))).getAll();
checkPoints(res, points);
U.sleep(5);
} catch (Exception e) {
err.set(e);
stop.set(true);
break;
}
}
return null;
}
}, 4, "qry-thread");
U.sleep(6000L);
stop.set(true);
putFut.get();
qryFut.get();
Exception err0 = err.get();
if (err0 != null)
throw err0;
} finally {
destroy(cache1, grid(0), dynamic);
}
}
use of javax.cache.Cache in project ignite by apache.
the class IgniteCacheLocalQuerySelfTest method testQueryLocal.
/**
* @throws Exception If test failed.
*/
@Test
public void testQueryLocal() throws Exception {
// check for cached prepared statements this would fail.
for (int i = 0; i < 2; i++) {
IgniteCache<Integer, String> cache = jcache(Integer.class, String.class);
cache.put(1, "value1");
cache.put(2, "value2");
cache.put(3, "value3");
cache.put(4, "value4");
cache.put(5, "value5");
// Tests equals query.
QueryCursor<Cache.Entry<Integer, String>> qry = cache.query(new SqlQuery<Integer, String>(String.class, "_val='value1'").setLocal(true));
Iterator<Cache.Entry<Integer, String>> iter = qry.iterator();
Cache.Entry<Integer, String> entry = iter.next();
assert !iter.hasNext();
assert entry != null;
assert entry.getKey() == 1;
assert "value1".equals(entry.getValue());
// Tests like query.
qry = cache.query(new SqlQuery<Integer, String>(String.class, "_val like 'value%'").setLocal(true));
iter = qry.iterator();
assert iter.next() != null;
assert iter.next() != null;
assert iter.next() != null;
assert iter.next() != null;
assert iter.next() != null;
assert !iter.hasNext();
// Test explain for primitive index.
List<List<?>> res = cache.query(new SqlFieldsQuery("explain select _key from String where _val > 'value1'").setLocal(true)).getAll();
assertTrue("__ explain: \n" + res, ((String) res.get(0).get(0)).toLowerCase().contains("_val_idx"));
cache.destroy();
}
}
use of javax.cache.Cache in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapsClosed.
/**
* @throws Exception If failed.
*/
private void checkUpdateCountersGapsClosed(CacheMode cacheMode) throws Exception {
testSpi = true;
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
IgniteEx nearNode = grid(srvCnt - 1);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, srvCnt - 1, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
Affinity<Object> aff = nearNode.affinity(cache.getName());
int[] nearBackupParts = aff.backupPartitions(nearNode.localNode());
int[] primaryParts = aff.primaryPartitions(primary.localNode());
Collection<Integer> nearSet = new HashSet<>();
for (int part : nearBackupParts) nearSet.add(part);
Collection<Integer> primarySet = new HashSet<>();
for (int part : primaryParts) primarySet.add(part);
// We need backup partitions on the near node.
nearSet.retainAll(primarySet);
List<Integer> keys = singlePartKeys(primary.cache(DEFAULT_CACHE_NAME), 20, nearSet.iterator().next());
int range = 3;
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(range * 2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
private final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < srvCnt - 1;
return false;
}
});
Transaction txA = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
for (int i = 0; i < range; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 2);
txA.commitAsync();
GridTestUtils.runAsync(() -> {
try (Transaction tx = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range; i < range * 2; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 1);
tx.commit();
}
}).get();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return primary.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
}
}, 3_000);
GridTestUtils.runAsync(() -> {
try (Transaction txB = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range * 2; i < range * 3; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 3);
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
assertEquals(range * 3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(range * 2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(5, SECONDS));
assertEquals(range * 2, arrivedEvts.size());
cur.close();
}
Aggregations