use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class CassandraDirectPersistenceTest method blobStrategyTest.
/** */
@Test
@SuppressWarnings("unchecked")
public void blobStrategyTest() {
CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes", new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
Collection<CacheEntryImpl<Long, Person>> personEntries = TestsHelper.generateLongsPersonsEntries();
LOGGER.info("Running BLOB strategy write tests");
LOGGER.info("Running single write operation tests");
store1.write(longEntries.iterator().next());
store2.write(personEntries.iterator().next());
store3.write(personEntries.iterator().next());
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
store1.writeAll(longEntries);
store2.writeAll(personEntries);
store3.writeAll(personEntries);
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("BLOB strategy write tests passed");
LOGGER.info("Running BLOB strategy read tests");
LOGGER.info("Running single read operation tests");
Long longVal = (Long) store1.load(longEntries.iterator().next().getKey());
if (!longEntries.iterator().next().getValue().equals(longVal))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
Person personVal = (Person) store2.load(personEntries.iterator().next().getKey());
if (!personEntries.iterator().next().getValue().equals(personVal))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
personVal = (Person) store3.load(personEntries.iterator().next().getKey());
if (!personEntries.iterator().next().getValue().equals(personVal))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
LOGGER.info("Single read operation tests passed");
LOGGER.info("Running bulk read operation tests");
Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
Map personValues = store2.loadAll(TestsHelper.getKeys(personEntries));
if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
personValues = store3.loadAll(TestsHelper.getKeys(personEntries));
if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
LOGGER.info("Bulk read operation tests passed");
LOGGER.info("BLOB strategy read tests passed");
LOGGER.info("Running BLOB strategy delete tests");
store1.delete(longEntries.iterator().next().getKey());
store1.deleteAll(TestsHelper.getKeys(longEntries));
store2.delete(personEntries.iterator().next().getKey());
store2.deleteAll(TestsHelper.getKeys(personEntries));
store3.delete(personEntries.iterator().next().getKey());
store3.deleteAll(TestsHelper.getKeys(personEntries));
LOGGER.info("BLOB strategy delete tests passed");
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class IgniteH2Indexing method queryDistributedSql.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public <K, V> QueryCursor<Cache.Entry<K, V>> queryDistributedSql(String schemaName, SqlQuery qry, boolean keepBinary, int mainCacheId) {
String type = qry.getType();
H2TableDescriptor tblDesc = tableDescriptor(schemaName, type);
if (tblDesc == null)
throw new IgniteSQLException("Failed to find SQL table for type: " + type, IgniteQueryErrorCode.TABLE_NOT_FOUND);
String sql;
try {
sql = generateQuery(qry.getSql(), qry.getAlias(), tblDesc);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
SqlFieldsQuery fqry = new SqlFieldsQuery(sql);
fqry.setArgs(qry.getArgs());
fqry.setPageSize(qry.getPageSize());
fqry.setDistributedJoins(qry.isDistributedJoins());
fqry.setPartitions(qry.getPartitions());
fqry.setLocal(qry.isLocal());
if (qry.getTimeout() > 0)
fqry.setTimeout(qry.getTimeout(), TimeUnit.MILLISECONDS);
final QueryCursor<List<?>> res = queryDistributedSqlFields(schemaName, fqry, keepBinary, null, mainCacheId);
final Iterable<Cache.Entry<K, V>> converted = new Iterable<Cache.Entry<K, V>>() {
@Override
public Iterator<Cache.Entry<K, V>> iterator() {
final Iterator<List<?>> iter0 = res.iterator();
return new Iterator<Cache.Entry<K, V>>() {
@Override
public boolean hasNext() {
return iter0.hasNext();
}
@Override
public Cache.Entry<K, V> next() {
List<?> l = iter0.next();
return new CacheEntryImpl<>((K) l.get(0), (V) l.get(1));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
// No metadata for SQL queries.
return new QueryCursorImpl<Cache.Entry<K, V>>(converted) {
@Override
public void close() {
res.close();
}
};
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridAbstractCacheStoreSelfTest method testSimpleMultithreading.
/**
* @throws Exception If failed.
*/
public void testSimpleMultithreading() throws Exception {
final Random rnd = new Random();
final LinkedBlockingQueue<UUID> queue = new LinkedBlockingQueue<>();
multithreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
for (int i = 0; i < 1000; i++) {
Transaction tx = rnd.nextBoolean() ? new DummyTx() : null;
ses.newSession(tx);
int op = rnd.nextInt(10);
boolean queueEmpty = false;
if (op < 4) {
// Load.
UUID key = queue.poll();
if (key == null)
queueEmpty = true;
else {
if (rnd.nextBoolean())
assertNotNull(store.load(key));
else {
Map<Object, Object> loaded = store.loadAll(Collections.singleton(key));
assertEquals(1, loaded.size());
Map.Entry<Object, Object> e = loaded.entrySet().iterator().next();
UUID k = (UUID) e.getKey();
UUID v = (UUID) e.getValue();
assertTrue(k.equals(v) || (k.getMostSignificantBits() == v.getLeastSignificantBits() && k.getLeastSignificantBits() == v.getMostSignificantBits()));
}
if (tx != null)
store.sessionEnd(true);
queue.add(key);
}
} else if (op < 6) {
// Remove.
UUID key = queue.poll();
if (key == null)
queueEmpty = true;
else {
if (rnd.nextBoolean())
store.delete(key);
else
store.deleteAll(Collections.singleton(key));
if (tx != null)
store.sessionEnd(true);
}
} else {
// Update.
UUID key = queue.poll();
if (key == null)
queueEmpty = true;
else {
UUID val = new UUID(key.getLeastSignificantBits(), key.getMostSignificantBits());
if (rnd.nextBoolean())
store.write(new CacheEntryImpl<>(key, val));
else {
Collection<Cache.Entry<? extends Object, ? extends Object>> col = new ArrayList<>();
col.add(new CacheEntryImpl<>(key, val));
store.writeAll(col);
}
if (tx != null)
store.sessionEnd(true);
queue.add(key);
}
}
if (queueEmpty) {
// Add.
UUID key = UUID.randomUUID();
if (rnd.nextBoolean())
store.write(new CacheEntryImpl<>(key, key));
else {
Collection<Cache.Entry<? extends Object, ? extends Object>> col = new ArrayList<>();
col.add(new CacheEntryImpl<>(key, key));
store.writeAll(col);
}
if (tx != null)
store.sessionEnd(true);
queue.add(key);
}
}
return null;
}
}, 37);
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class CacheUtils method sparseFold.
private static <K, V, A> A sparseFold(String cacheName, IgniteBiFunction<Cache.Entry<K, V>, A, A> folder, IgnitePredicate<K> keyFilter, BinaryOperator<A> accumulator, A zeroVal, V defVal, K defKey, long defValCnt, boolean isNilpotent) {
A defRes = zeroVal;
if (!isNilpotent)
for (int i = 0; i < defValCnt; i++) defRes = folder.apply(new CacheEntryImpl<>(defKey, defVal), defRes);
Collection<A> totalRes = bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for ScanQuery. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode localNode = ignite.cluster().localNode();
A a = zeroVal;
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == localNode && (keyFilter == null || keyFilter.apply(k))))) a = folder.apply(entry, a);
}
return a;
});
totalRes.add(defRes);
return totalRes.stream().reduce(zeroVal, accumulator);
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridCacheWriteBehindStoreAbstractSelfTest method runPutGetRemoveMultithreaded.
/**
* Performs multiple put, get and remove operations in several threads on a store. After
* all threads finished their operations, returns the total set of keys that should be
* in underlying store.
*
* @param threadCnt Count of threads that should update keys.
* @param keysPerThread Count of unique keys assigned to a thread.
* @return Set of keys that was totally put in store.
* @throws Exception If failed.
*/
protected Set<Integer> runPutGetRemoveMultithreaded(int threadCnt, final int keysPerThread) throws Exception {
final ConcurrentMap<String, Set<Integer>> perThread = new ConcurrentHashMap<>();
final AtomicBoolean running = new AtomicBoolean(true);
final AtomicInteger cntr = new AtomicInteger();
final AtomicInteger operations = new AtomicInteger();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@SuppressWarnings({ "NullableProblems" })
@Override
public void run() {
// Initialize key set for this thread.
Set<Integer> set = new HashSet<>();
Set<Integer> old = perThread.putIfAbsent(Thread.currentThread().getName(), set);
if (old != null)
set = old;
List<Integer> original = new ArrayList<>();
Random rnd = new Random();
for (int i = 0; i < keysPerThread; i++) original.add(cntr.getAndIncrement());
try {
while (running.get()) {
int op = rnd.nextInt(3);
int idx = rnd.nextInt(keysPerThread);
int key = original.get(idx);
switch(op) {
case 0:
store.write(new CacheEntryImpl<>(key, "val" + key));
set.add(key);
operations.incrementAndGet();
break;
case 1:
store.delete(key);
set.remove(key);
operations.incrementAndGet();
break;
case 2:
default:
store.write(new CacheEntryImpl<>(key, "broken"));
String val = store.load(key);
assertEquals("Invalid intermediate value: " + val, "broken", val);
store.write(new CacheEntryImpl<>(key, "val" + key));
set.add(key);
// 2 put operations performed here.
operations.incrementAndGet();
operations.incrementAndGet();
operations.incrementAndGet();
break;
}
}
} catch (Exception e) {
error("Unexpected exception in put thread", e);
assert false;
}
}
}, threadCnt, "put");
U.sleep(10000);
running.set(false);
fut.get();
log().info(">>> " + operations + " operations performed totally");
Set<Integer> total = new HashSet<>();
for (Set<Integer> threadVals : perThread.values()) {
total.addAll(threadVals);
}
return total;
}
Aggregations