use of javax.cache.Cache in project ignite by apache.
the class GridCacheQueryTransformerSelfTest method testLocalFiltered.
/**
* @throws Exception If failed.
*/
@Test
public void testLocalFiltered() throws Exception {
IgniteCache<Integer, Value> cache = grid().createCache("test-cache");
try {
for (int i = 0; i < 50; i++) cache.put(i, new Value("str" + i, i * 100));
Collection<List<Integer>> lists = grid().compute().broadcast(new IgniteCallable<List<Integer>>() {
@IgniteInstanceResource
private Ignite ignite;
@Override
public List<Integer> call() throws Exception {
IgniteBiPredicate<Integer, Value> filter = new IgniteBiPredicate<Integer, Value>() {
@Override
public boolean apply(Integer k, Value v) {
return v.idx % 1000 == 0;
}
};
IgniteClosure<Cache.Entry<Integer, Value>, Integer> transformer = new IgniteClosure<Cache.Entry<Integer, Value>, Integer>() {
@Override
public Integer apply(Cache.Entry<Integer, Value> e) {
return e.getValue().idx;
}
};
return ignite.cache("test-cache").query(new ScanQuery<>(filter).setLocal(true), transformer).getAll();
}
});
List<Integer> res = new ArrayList<>(F.flatCollections(lists));
assertEquals(5, res.size());
Collections.sort(res);
for (int i = 0; i < 5; i++) assertEquals(i * 1000, res.get(i).intValue());
} finally {
cache.destroy();
}
}
use of javax.cache.Cache in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method testUpdateExplicitPartitionsWithReducer.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateExplicitPartitionsWithReducer() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 10).setIndexedTypes(Integer.class, Integer.class);
Ignite ignite = startGridsMultiThreaded(4);
awaitPartitionMapExchange();
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
Affinity<Object> affinity = internalCache0(cache).affinity();
int keysCnt = 10, retryCnt = 0;
Integer test = 0;
Map<Integer, Integer> vals = new LinkedHashMap<>();
while (vals.size() < keysCnt) {
int partition = affinity.partition(test);
if (partition == 1 || partition == 2)
vals.put(test, 0);
else
assertTrue("Maximum retry number exceeded", ++retryCnt < 1000);
test++;
}
cache.putAll(vals);
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer set _val=(SELECT 2 FROM DUAL)").setPartitions(1, 2);
List<List<?>> all = cache.query(qry).getAll();
assertEquals(Long.valueOf(keysCnt), all.stream().findFirst().orElseThrow(AssertionError::new).get(0));
List<List<?>> rows = cache.query(new SqlFieldsQuery("SELECT _val FROM Integer")).getAll();
assertEquals(keysCnt, rows.size());
assertTrue(rows.stream().map(r -> r.get(0)).map(Integer.class::cast).allMatch(v -> v == 2));
}
use of javax.cache.Cache in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method testIterator.
/**
* @throws Exception If failed.
*/
@Test
public void testIterator() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
startGrid(getConfiguration("grid").setMvccVacuumFrequency(Integer.MAX_VALUE));
Ignite client = startClientGrid(getConfiguration("client"));
IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
cache.put(1, 1);
cache.put(2, 2);
cache.put(3, 3);
cache.put(4, 4);
List<List<?>> res;
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(TX_TIMEOUT);
res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _key " + "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
assertEquals(4L, res.get(0).get(0));
tx.rollback();
}
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(TX_TIMEOUT);
res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " + "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
assertEquals(4L, res.get(0).get(0));
res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " + "WHEN 10 THEN 100 WHEN 20 THEN 200 ELSE 300 END")).getAll();
assertEquals(4L, res.get(0).get(0));
res = cache.query(new SqlFieldsQuery("DELETE FROM Integer WHERE _key = 4")).getAll();
assertEquals(1L, res.get(0).get(0));
tx.commit();
}
IgniteCache<Integer, Integer> cache0 = client.cache(DEFAULT_CACHE_NAME);
Iterator<Cache.Entry<Integer, Integer>> it = cache0.iterator();
Map<Integer, Integer> map = new HashMap<>();
while (it.hasNext()) {
Cache.Entry<Integer, Integer> e = it.next();
assertNull("duplicate key returned from iterator", map.putIfAbsent(e.getKey(), e.getValue()));
}
assertEquals(3, map.size());
assertEquals(100, map.get(1).intValue());
assertEquals(200, map.get(2).intValue());
assertEquals(300, map.get(3).intValue());
}
use of javax.cache.Cache in project ignite by apache.
the class CacheAbstractJdbcStore method writeAll.
/**
* {@inheritDoc}
*/
@Override
public void writeAll(final Collection<Cache.Entry<? extends K, ? extends V>> entries) throws CacheWriterException {
assert entries != null;
Connection conn = null;
try {
conn = connection();
String cacheName = session().cacheName();
Object currKeyTypeId = null;
if (dialect.hasMerge()) {
PreparedStatement mergeStmt = null;
try {
EntryMapping em = null;
LazyValue<Object[]> lazyEntries = new LazyValue<Object[]>() {
@Override
public Object[] create() {
return entries.toArray();
}
};
int fromIdx = 0, prepared = 0;
for (Cache.Entry<? extends K, ? extends V> entry : entries) {
K key = entry.getKey();
Object keyTypeId = typeIdForObject(key);
em = entryMapping(cacheName, keyTypeId);
if (currKeyTypeId == null || !currKeyTypeId.equals(keyTypeId)) {
if (mergeStmt != null) {
if (log.isDebugEnabled())
log.debug("Write entries to db [cache=" + U.maskName(cacheName) + ", keyType=" + em.keyType() + ", cnt=" + prepared + "]");
executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries);
U.closeQuiet(mergeStmt);
}
mergeStmt = conn.prepareStatement(em.mergeQry);
currKeyTypeId = keyTypeId;
fromIdx += prepared;
prepared = 0;
}
int idx = fillKeyParameters(mergeStmt, em, key);
fillValueParameters(mergeStmt, idx, em, entry.getValue());
mergeStmt.addBatch();
if (++prepared % batchSize == 0) {
if (log.isDebugEnabled())
log.debug("Write entries to db [cache=" + U.maskName(cacheName) + ", keyType=" + em.keyType() + ", cnt=" + prepared + "]");
executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries);
fromIdx += prepared;
prepared = 0;
}
}
if (mergeStmt != null && prepared % batchSize != 0) {
if (log.isDebugEnabled())
log.debug("Write entries to db [cache=" + U.maskName(cacheName) + ", keyType=" + em.keyType() + ", cnt=" + prepared + "]");
executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries);
}
} finally {
U.closeQuiet(mergeStmt);
}
} else {
if (log.isDebugEnabled())
log.debug("Write entries to db one by one using update and insert statements " + "[cache=" + U.maskName(cacheName) + ", cnt=" + entries.size() + "]");
PreparedStatement insStmt = null;
PreparedStatement updStmt = null;
try {
for (Cache.Entry<? extends K, ? extends V> entry : entries) {
K key = entry.getKey();
Object keyTypeId = typeIdForObject(key);
EntryMapping em = entryMapping(cacheName, keyTypeId);
if (currKeyTypeId == null || !currKeyTypeId.equals(keyTypeId)) {
U.closeQuiet(insStmt);
insStmt = conn.prepareStatement(em.insQry);
U.closeQuiet(updStmt);
updStmt = conn.prepareStatement(em.updQry);
currKeyTypeId = keyTypeId;
}
writeUpsert(insStmt, updStmt, em, entry);
}
} finally {
U.closeQuiet(insStmt);
U.closeQuiet(updStmt);
}
}
} catch (SQLException e) {
throw new CacheWriterException("Failed to write entries in database", e);
} finally {
closeConnection(conn);
}
}
use of javax.cache.Cache in project ignite by apache.
the class CacheDataStructuresManager method removeSetData.
/**
* @param setId Set ID.
* @param topVer Topology version.
* @throws IgniteCheckedException If failed.
*/
private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) throws IgniteCheckedException {
boolean loc = cctx.isLocal();
GridCacheAffinityManager aff = cctx.affinity();
if (!loc) {
aff.affinityReadyFuture(topVer).get();
cctx.preloader().syncFuture().get();
}
IgniteInternalCache<?, ?> cache = cctx.cache();
final int BATCH_SIZE = 100;
Collection<SetItemKey> keys = new ArrayList<>(BATCH_SIZE);
for (Cache.Entry entry : cache.localEntries(new CachePeekMode[] { CachePeekMode.PRIMARY })) {
Object obj = entry.getKey();
if (!(obj instanceof SetItemKey && setId.equals(((SetItemKey) obj).setId())))
continue;
keys.add((SetItemKey) obj);
if (keys.size() == BATCH_SIZE) {
retryRemoveAll(cache, keys);
keys.clear();
}
}
if (!keys.isEmpty())
retryRemoveAll(cache, keys);
}
Aggregations