use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class CassandraDirectPersistenceTest method pojoStrategyTest.
/** */
@Test
@SuppressWarnings("unchecked")
public void pojoStrategyTest() {
CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store4 = CacheStoreHelper.createCacheStore("persons", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml"), CassandraHelper.getAdminDataSrc());
CacheStore productStore = CacheStoreHelper.createCacheStore("product", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"), CassandraHelper.getAdminDataSrc());
CacheStore orderStore = CacheStoreHelper.createCacheStore("order", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<Long, Person>> entries1 = TestsHelper.generateLongsPersonsEntries();
Collection<CacheEntryImpl<PersonId, Person>> entries2 = TestsHelper.generatePersonIdsPersonsEntries();
Collection<CacheEntryImpl<PersonId, Person>> entries3 = TestsHelper.generatePersonIdsPersonsEntries();
Collection<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
Collection<CacheEntryImpl<Long, ProductOrder>> orderEntries = TestsHelper.generateOrderEntries();
LOGGER.info("Running POJO strategy write tests");
LOGGER.info("Running single write operation tests");
store1.write(entries1.iterator().next());
store2.write(entries2.iterator().next());
store3.write(entries3.iterator().next());
store4.write(entries3.iterator().next());
productStore.write(productEntries.iterator().next());
orderStore.write(orderEntries.iterator().next());
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
store1.writeAll(entries1);
store2.writeAll(entries2);
store3.writeAll(entries3);
store4.writeAll(entries3);
productStore.writeAll(productEntries);
orderStore.writeAll(orderEntries);
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("POJO strategy write tests passed");
LOGGER.info("Running POJO strategy read tests");
LOGGER.info("Running single read operation tests");
Person person = (Person) store1.load(entries1.iterator().next().getKey());
if (!entries1.iterator().next().getValue().equalsPrimitiveFields(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store2.load(entries2.iterator().next().getKey());
if (!entries2.iterator().next().getValue().equalsPrimitiveFields(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store3.load(entries3.iterator().next().getKey());
if (!entries3.iterator().next().getValue().equals(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store4.load(entries3.iterator().next().getKey());
if (!entries3.iterator().next().getValue().equals(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
Product product = (Product) productStore.load(productEntries.iterator().next().getKey());
if (!productEntries.iterator().next().getValue().equals(product))
throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
ProductOrder order = (ProductOrder) orderStore.load(orderEntries.iterator().next().getKey());
if (!orderEntries.iterator().next().getValue().equals(order))
throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
LOGGER.info("Single read operation tests passed");
LOGGER.info("Running bulk read operation tests");
Map persons = store1.loadAll(TestsHelper.getKeys(entries1));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries1, true))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store2.loadAll(TestsHelper.getKeys(entries2));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries2, true))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store3.loadAll(TestsHelper.getKeys(entries3));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store4.loadAll(TestsHelper.getKeys(entries3));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
Map products = productStore.loadAll(TestsHelper.getKeys(productEntries));
if (!TestsHelper.checkProductCollectionsEqual(products, productEntries))
throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
Map orders = orderStore.loadAll(TestsHelper.getKeys(orderEntries));
if (!TestsHelper.checkOrderCollectionsEqual(orders, orderEntries))
throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
LOGGER.info("Bulk read operation tests passed");
LOGGER.info("POJO strategy read tests passed");
LOGGER.info("Running POJO strategy delete tests");
store1.delete(entries1.iterator().next().getKey());
store1.deleteAll(TestsHelper.getKeys(entries1));
store2.delete(entries2.iterator().next().getKey());
store2.deleteAll(TestsHelper.getKeys(entries2));
store3.delete(entries3.iterator().next().getKey());
store3.deleteAll(TestsHelper.getKeys(entries3));
store4.delete(entries3.iterator().next().getKey());
store4.deleteAll(TestsHelper.getKeys(entries3));
productStore.delete(productEntries.iterator().next().getKey());
productStore.deleteAll(TestsHelper.getKeys(productEntries));
orderStore.delete(orderEntries.iterator().next().getKey());
orderStore.deleteAll(TestsHelper.getKeys(orderEntries));
LOGGER.info("POJO strategy delete tests passed");
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class CassandraDirectPersistenceTest method primitiveStrategyTest.
/** */
@Test
@SuppressWarnings("unchecked")
public void primitiveStrategyTest() {
CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes", new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store2 = CacheStoreHelper.createCacheStore("stringTypes", new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
Collection<CacheEntryImpl<String, String>> strEntries = TestsHelper.generateStringsEntries();
Collection<Long> fakeLongKeys = TestsHelper.getKeys(longEntries);
fakeLongKeys.add(-1L);
fakeLongKeys.add(-2L);
fakeLongKeys.add(-3L);
fakeLongKeys.add(-4L);
Collection<String> fakeStrKeys = TestsHelper.getKeys(strEntries);
fakeStrKeys.add("-1");
fakeStrKeys.add("-2");
fakeStrKeys.add("-3");
fakeStrKeys.add("-4");
LOGGER.info("Running PRIMITIVE strategy write tests");
LOGGER.info("Running single write operation tests");
store1.write(longEntries.iterator().next());
store2.write(strEntries.iterator().next());
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
store1.writeAll(longEntries);
store2.writeAll(strEntries);
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("PRIMITIVE strategy write tests passed");
LOGGER.info("Running PRIMITIVE strategy read tests");
LOGGER.info("Running single read operation tests");
LOGGER.info("Running real keys read tests");
Long longVal = (Long) store1.load(longEntries.iterator().next().getKey());
if (!longEntries.iterator().next().getValue().equals(longVal))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
String strVal = (String) store2.load(strEntries.iterator().next().getKey());
if (!strEntries.iterator().next().getValue().equals(strVal))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Running fake keys read tests");
longVal = (Long) store1.load(-1L);
if (longVal != null)
throw new RuntimeException("Long value with fake key '-1' was found in Cassandra");
strVal = (String) store2.load("-1");
if (strVal != null)
throw new RuntimeException("String value with fake key '-1' was found in Cassandra");
LOGGER.info("Single read operation tests passed");
LOGGER.info("Running bulk read operation tests");
LOGGER.info("Running real keys read tests");
Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
Map strValues = store2.loadAll(TestsHelper.getKeys(strEntries));
if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Running fake keys read tests");
longValues = store1.loadAll(fakeLongKeys);
if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
strValues = store2.loadAll(fakeStrKeys);
if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Bulk read operation tests passed");
LOGGER.info("PRIMITIVE strategy read tests passed");
LOGGER.info("Running PRIMITIVE strategy delete tests");
LOGGER.info("Deleting real keys");
store1.delete(longEntries.iterator().next().getKey());
store1.deleteAll(TestsHelper.getKeys(longEntries));
store2.delete(strEntries.iterator().next().getKey());
store2.deleteAll(TestsHelper.getKeys(strEntries));
LOGGER.info("Deleting fake keys");
store1.delete(-1L);
store2.delete("-1");
store1.deleteAll(fakeLongKeys);
store2.deleteAll(fakeStrKeys);
LOGGER.info("PRIMITIVE strategy delete tests passed");
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridCacheQueryManager method runQuery.
/**
* Processes cache query request.
*
* @param qryInfo Query info.
*/
@SuppressWarnings("unchecked")
protected void runQuery(GridCacheQueryInfo qryInfo) {
assert qryInfo != null;
assert qryInfo.query().type() != SCAN || !qryInfo.local() : qryInfo;
if (!enterBusy()) {
if (cctx.localNodeId().equals(qryInfo.senderId()))
throw new IllegalStateException("Failed to process query request (grid is stopping).");
// Ignore remote requests when when node is stopping.
return;
}
try {
boolean loc = qryInfo.local();
QueryResult<K, V> res = null;
if (log.isDebugEnabled())
log.debug("Running query: " + qryInfo);
boolean rmvIter = true;
try {
// Preparing query closures.
IgniteClosure<Cache.Entry<K, V>, Object> trans = (IgniteClosure<Cache.Entry<K, V>, Object>) qryInfo.transformer();
IgniteReducer<Cache.Entry<K, V>, Object> rdc = (IgniteReducer<Cache.Entry<K, V>, Object>) qryInfo.reducer();
injectResources(trans);
injectResources(rdc);
GridCacheQueryAdapter<?> qry = qryInfo.query();
int pageSize = qry.pageSize();
boolean incBackups = qry.includeBackups();
String taskName = cctx.kernalContext().task().resolveTaskName(qry.taskHash());
IgniteSpiCloseableIterator<IgniteBiTuple<K, V>> iter;
GridCacheQueryType type;
res = loc ? executeQuery(qry, qryInfo.arguments(), loc, qry.subjectId(), taskName, recipient(qryInfo.senderId(), qryInfo.requestId())) : queryResult(qryInfo, taskName);
if (res == null)
return;
iter = res.iterator(recipient(qryInfo.senderId(), qryInfo.requestId()));
type = res.type();
final GridCacheAdapter<K, V> cache = cctx.cache();
if (log.isDebugEnabled())
log.debug("Received index iterator [iterHasNext=" + iter.hasNext() + ", cacheSize=" + cache.size() + ']');
int cnt = 0;
boolean stop = false;
boolean pageSent = false;
Collection<Object> data = new ArrayList<>(pageSize);
AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();
final boolean statsEnabled = cctx.config().isStatisticsEnabled();
final boolean readEvt = cctx.gridEvents().isRecordable(EVT_CACHE_QUERY_OBJECT_READ);
while (!Thread.currentThread().isInterrupted() && iter.hasNext()) {
long start = statsEnabled ? System.nanoTime() : 0L;
IgniteBiTuple<K, V> row = iter.next();
// Query is cancelled.
if (row == null) {
onPageReady(loc, qryInfo, null, true, null);
break;
}
final K key = row.getKey();
// Other types are filtered in indexing manager.
if (!cctx.isReplicated() && qry.type() == SCAN && qry.partition() == null && cctx.config().getCacheMode() != LOCAL && !incBackups && !cctx.affinity().primaryByKey(cctx.localNode(), key, topVer)) {
if (log.isDebugEnabled())
log.debug("Ignoring backup element [row=" + row + ", cacheMode=" + cctx.config().getCacheMode() + ", incBackups=" + incBackups + ", primary=" + cctx.affinity().primaryByKey(cctx.localNode(), key, topVer) + ']');
continue;
}
V val = row.getValue();
if (log.isDebugEnabled()) {
ClusterNode primaryNode = cctx.affinity().primaryByKey(key, cctx.affinity().affinityTopologyVersion());
log.debug(S.toString("Record", "key", key, true, "val", val, true, "incBackups", incBackups, false, "priNode", primaryNode != null ? U.id8(primaryNode.id()) : null, false, "node", U.id8(cctx.localNode().id()), false));
}
if (val == null) {
if (log.isDebugEnabled())
log.debug(S.toString("Unsuitable record value", "val", val, true));
continue;
}
if (statsEnabled) {
CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.onRead(true);
metrics.addGetTimeNanos(System.nanoTime() - start);
}
K key0 = null;
V val0 = null;
if (readEvt) {
key0 = (K) cctx.unwrapBinaryIfNeeded(key, qry.keepBinary());
val0 = (V) cctx.unwrapBinaryIfNeeded(val, qry.keepBinary());
switch(type) {
case SQL:
cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "SQL query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SQL.name(), cctx.name(), qry.queryClassName(), qry.clause(), null, null, qryInfo.arguments(), qry.subjectId(), taskName, key0, val0, null, null));
break;
case TEXT:
cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "Full text query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.FULL_TEXT.name(), cctx.name(), qry.queryClassName(), qry.clause(), null, null, null, qry.subjectId(), taskName, key0, val0, null, null));
break;
case SCAN:
cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "Scan query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SCAN.name(), cctx.name(), null, null, qry.scanFilter(), null, null, qry.subjectId(), taskName, key0, val0, null, null));
break;
}
}
if (rdc != null || trans != null) {
if (key0 == null)
key0 = (K) cctx.unwrapBinaryIfNeeded(key, qry.keepBinary());
if (val0 == null)
val0 = (V) cctx.unwrapBinaryIfNeeded(val, qry.keepBinary());
Cache.Entry<K, V> entry = new CacheEntryImpl(key0, val0);
// Reduce.
if (rdc != null) {
if (!rdc.collect(entry) || !iter.hasNext()) {
onPageReady(loc, qryInfo, Collections.singletonList(rdc.reduce()), true, null);
pageSent = true;
break;
} else
continue;
}
data.add(trans != null ? trans.apply(entry) : !loc ? new GridCacheQueryResponseEntry<>(key, val) : F.t(key, val));
} else
data.add(!loc ? new GridCacheQueryResponseEntry<>(key, val) : F.t(key, val));
if (!loc) {
if (++cnt == pageSize || !iter.hasNext()) {
boolean finished = !iter.hasNext();
onPageReady(loc, qryInfo, data, finished, null);
pageSent = true;
if (!finished)
rmvIter = false;
if (!qryInfo.allPages())
return;
data = new ArrayList<>(pageSize);
if (stop)
// while
break;
}
}
}
if (!pageSent) {
if (rdc == null)
onPageReady(loc, qryInfo, data, true, null);
else
onPageReady(loc, qryInfo, Collections.singletonList(rdc.reduce()), true, null);
}
} catch (Throwable e) {
if (!X.hasCause(e, GridDhtUnreservedPartitionException.class))
U.error(log, "Failed to run query [qry=" + qryInfo + ", node=" + cctx.nodeId() + "]", e);
onPageReady(loc, qryInfo, null, true, e);
if (e instanceof Error)
throw (Error) e;
} finally {
if (loc) {
// Local iterators are always removed.
if (res != null) {
try {
res.closeIfNotShared(recipient(qryInfo.senderId(), qryInfo.requestId()));
} catch (IgniteCheckedException e) {
if (!X.hasCause(e, GridDhtUnreservedPartitionException.class))
U.error(log, "Failed to close local iterator [qry=" + qryInfo + ", node=" + cctx.nodeId() + "]", e);
}
}
} else if (rmvIter)
removeQueryResult(qryInfo.senderId(), qryInfo.requestId());
}
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class IgnitePersistentStoreTest method loadCacheTest.
/** */
@Test
public void loadCacheTest() {
Ignition.stopAll(true);
LOGGER.info("Running loadCache test");
LOGGER.info("Filling Cassandra table with test data");
CacheStore store = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<PersonId, Person>> entries = TestsHelper.generatePersonIdsPersonsEntries();
//noinspection unchecked
store.writeAll(entries);
LOGGER.info("Cassandra table filled with test data");
LOGGER.info("Running loadCache test");
try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
CacheConfiguration<PersonId, Person> ccfg = new CacheConfiguration<>("cache3");
IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(ccfg);
int size = personCache3.size(CachePeekMode.ALL);
LOGGER.info("Initial cache size " + size);
LOGGER.info("Loading cache data from Cassandra table");
String qry = "select * from test1.pojo_test3 limit 3";
personCache3.loadCache(null, qry);
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by '" + qry + "'", 3, size);
personCache3.clear();
personCache3.loadCache(null, new SimpleStatement(qry));
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by statement", 3, size);
personCache3.clear();
personCache3.loadCache(null);
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra. " + "Expected number of records is " + TestsHelper.getBulkOperationSize() + ", but loaded number of records is " + size, TestsHelper.getBulkOperationSize(), size);
LOGGER.info("Cache data loaded from Cassandra table");
}
LOGGER.info("loadCache test passed");
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class CassandraDirectPersistenceTest method pojoStrategyTransactionTest.
/** */
@Test
@SuppressWarnings("unchecked")
public void pojoStrategyTransactionTest() {
Map<Object, Object> sessionProps = U.newHashMap(1);
Transaction sessionTx = new TestTransaction();
CacheStore productStore = CacheStoreHelper.createCacheStore("product", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"), CassandraHelper.getAdminDataSrc(), new TestCacheSession("product", sessionTx, sessionProps));
CacheStore orderStore = CacheStoreHelper.createCacheStore("order", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"), CassandraHelper.getAdminDataSrc(), new TestCacheSession("order", sessionTx, sessionProps));
List<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> ordersPerProduct = TestsHelper.generateOrdersPerProductEntries(productEntries, 2);
Collection<Long> productIds = TestsHelper.getProductIds(productEntries);
Collection<Long> orderIds = TestsHelper.getOrderIds(ordersPerProduct);
LOGGER.info("Running POJO strategy transaction write tests");
LOGGER.info("Running single write operation tests");
CassandraHelper.dropTestKeyspaces();
Product product = productEntries.iterator().next().getValue();
ProductOrder order = ordersPerProduct.get(product.getId()).iterator().next().getValue();
productStore.write(productEntries.iterator().next());
orderStore.write(ordersPerProduct.get(product.getId()).iterator().next());
if (productStore.load(product.getId()) != null || orderStore.load(order.getId()) != null) {
throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " + "objects were already persisted into Cassandra");
}
Map<Long, Product> products = (Map<Long, Product>) productStore.loadAll(productIds);
Map<Long, ProductOrder> orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " + "objects were already persisted into Cassandra");
}
//noinspection deprecation
orderStore.sessionEnd(true);
//noinspection deprecation
productStore.sessionEnd(true);
Product product1 = (Product) productStore.load(product.getId());
ProductOrder order1 = (ProductOrder) orderStore.load(order.getId());
if (product1 == null || order1 == null) {
throw new RuntimeException("Single write operation test failed. Transaction was committed, but " + "no objects were persisted into Cassandra");
}
if (!product.equals(product1) || !order.equals(order1)) {
throw new RuntimeException("Single write operation test failed. Transaction was committed, but " + "objects were incorrectly persisted/loaded to/from Cassandra");
}
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
throw new RuntimeException("Single write operation test failed. Transaction was committed, but " + "no objects were persisted into Cassandra");
}
if (products.size() > 1 || orders.size() > 1) {
throw new RuntimeException("Single write operation test failed. There were committed more objects " + "into Cassandra than expected");
}
product1 = products.entrySet().iterator().next().getValue();
order1 = orders.entrySet().iterator().next().getValue();
if (!product.equals(product1) || !order.equals(order1)) {
throw new RuntimeException("Single write operation test failed. Transaction was committed, but " + "objects were incorrectly persisted/loaded to/from Cassandra");
}
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
CassandraHelper.dropTestKeyspaces();
sessionProps.clear();
productStore.writeAll(productEntries);
for (Long productId : ordersPerProduct.keySet()) orderStore.writeAll(ordersPerProduct.get(productId));
for (Long productId : productIds) {
if (productStore.load(productId) != null) {
throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " + "objects were already persisted into Cassandra");
}
}
for (Long orderId : orderIds) {
if (orderStore.load(orderId) != null) {
throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " + "objects were already persisted into Cassandra");
}
}
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " + "objects were already persisted into Cassandra");
}
//noinspection deprecation
productStore.sessionEnd(true);
//noinspection deprecation
orderStore.sessionEnd(true);
for (CacheEntryImpl<Long, Product> entry : productEntries) {
product = (Product) productStore.load(entry.getKey());
if (!entry.getValue().equals(product)) {
throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " + "not all objects were persisted into Cassandra");
}
}
for (Long productId : ordersPerProduct.keySet()) {
for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
order = (ProductOrder) orderStore.load(entry.getKey());
if (!entry.getValue().equals(order)) {
throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " + "not all objects were persisted into Cassandra");
}
}
}
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " + "no objects were persisted into Cassandra");
}
if (products.size() < productIds.size() || orders.size() < orderIds.size()) {
throw new RuntimeException("Bulk write operation test failed. There were committed less objects " + "into Cassandra than expected");
}
if (products.size() > productIds.size() || orders.size() > orderIds.size()) {
throw new RuntimeException("Bulk write operation test failed. There were committed more objects " + "into Cassandra than expected");
}
for (CacheEntryImpl<Long, Product> entry : productEntries) {
product = products.get(entry.getKey());
if (!entry.getValue().equals(product)) {
throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " + "some objects were incorrectly persisted/loaded to/from Cassandra");
}
}
for (Long productId : ordersPerProduct.keySet()) {
for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
order = orders.get(entry.getKey());
if (!entry.getValue().equals(order)) {
throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " + "some objects were incorrectly persisted/loaded to/from Cassandra");
}
}
}
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("POJO strategy transaction write tests passed");
LOGGER.info("Running POJO strategy transaction delete tests");
LOGGER.info("Running single delete tests");
sessionProps.clear();
Product deletedProduct = productEntries.remove(0).getValue();
ProductOrder deletedOrder = ordersPerProduct.get(deletedProduct.getId()).remove(0).getValue();
productStore.delete(deletedProduct.getId());
orderStore.delete(deletedOrder.getId());
if (productStore.load(deletedProduct.getId()) == null || orderStore.load(deletedOrder.getId()) == null) {
throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " + "objects were already deleted from Cassandra");
}
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if (products.size() != productIds.size() || orders.size() != orderIds.size()) {
throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " + "objects were already deleted from Cassandra");
}
//noinspection deprecation
productStore.sessionEnd(true);
//noinspection deprecation
orderStore.sessionEnd(true);
if (productStore.load(deletedProduct.getId()) != null || orderStore.load(deletedOrder.getId()) != null) {
throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " + "objects were not deleted from Cassandra");
}
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if (products.get(deletedProduct.getId()) != null || orders.get(deletedOrder.getId()) != null) {
throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " + "objects were not deleted from Cassandra");
}
LOGGER.info("Single delete tests passed");
LOGGER.info("Running bulk delete tests");
sessionProps.clear();
productStore.deleteAll(productIds);
orderStore.deleteAll(orderIds);
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
throw new RuntimeException("Bulk delete operation test failed. Transaction wasn't committed yet, but " + "objects were already deleted from Cassandra");
}
//noinspection deprecation
orderStore.sessionEnd(true);
//noinspection deprecation
productStore.sessionEnd(true);
products = (Map<Long, Product>) productStore.loadAll(productIds);
orders = (Map<Long, ProductOrder>) orderStore.loadAll(orderIds);
if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
throw new RuntimeException("Bulk delete operation test failed. Transaction was committed, but " + "objects were not deleted from Cassandra");
}
LOGGER.info("Bulk delete tests passed");
LOGGER.info("POJO strategy transaction delete tests passed");
}
Aggregations