use of org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL in project ignite by apache.
the class CacheMvccAbstractTest method readAllByMode.
/**
* Reads value from cache for the given key using given read mode.
*
* @param cache Cache.
* @param keys Key.
* @param readMode Read mode.
* @param codec Value codec.
* @return Value.
*/
@SuppressWarnings("unchecked")
protected Map readAllByMode(IgniteCache cache, Set keys, ReadMode readMode, ObjectCodec codec) {
assert cache != null && keys != null && readMode != null;
assert readMode != SQL || codec != null;
boolean emulateLongQry = ThreadLocalRandom.current().nextBoolean();
switch(readMode) {
case GET:
return cache.getAll(keys);
case SCAN:
ScanQuery scanQry = new ScanQuery(new IgniteBiPredicate() {
@Override
public boolean apply(Object k, Object v) {
if (emulateLongQry)
doSleep(ThreadLocalRandom.current().nextInt(50));
return keys.contains(k);
}
});
Map res;
try (QueryCursor qry = cache.query(scanQry)) {
res = (Map) qry.getAll().stream().collect(Collectors.toMap(v -> ((IgniteBiTuple) v).getKey(), v -> ((IgniteBiTuple) v).getValue()));
assertTrue("res.size()=" + res.size() + ", keys.size()=" + keys.size(), res.size() <= keys.size());
}
return res;
case SQL:
StringBuilder b = new StringBuilder("SELECT " + codec.columnsNames() + " FROM " + codec.tableName() + " WHERE _key IN (");
boolean first = true;
for (Object key : keys) {
if (first)
first = false;
else
b.append(", ");
b.append(key);
}
b.append(')');
String qry = b.toString();
SqlFieldsQuery sqlFieldsQry = new SqlFieldsQuery(qry);
if (emulateLongQry)
sqlFieldsQry.setLazy(true).setPageSize(1);
List<List> rows;
try (FieldsQueryCursor<List> cur = cache.query(sqlFieldsQry)) {
if (emulateLongQry) {
rows = new ArrayList<>();
for (List row : cur) {
rows.add(row);
doSleep(ThreadLocalRandom.current().nextInt(50));
}
} else
rows = cur.getAll();
}
if (rows.isEmpty())
return Collections.emptyMap();
res = new HashMap();
for (List row : rows) res.put(row.get(0), codec.decode(row));
return res;
case SQL_SUM:
b = new StringBuilder("SELECT SUM(" + codec.aggregateColumnName() + ") FROM " + codec.tableName() + " WHERE _key IN (");
first = true;
for (Object key : keys) {
if (first)
first = false;
else
b.append(", ");
b.append(key);
}
b.append(')');
qry = b.toString();
FieldsQueryCursor<List> cur = cache.query(new SqlFieldsQuery(qry));
rows = cur.getAll();
if (rows.isEmpty())
return Collections.emptyMap();
res = new HashMap();
for (List row : rows) res.put(row.get(0), row.get(0));
return res;
default:
throw new AssertionError("Unsupported read mode: " + readMode);
}
}
use of org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL in project ignite by apache.
the class MvccRepeatableReadOperationsTest method testReplaceConsistency.
/**
* Check getAndPut/getAndRemove operations consistency.
*
* @throws IgniteCheckedException If failed.
*/
@Test
public void testReplaceConsistency() throws IgniteCheckedException {
Ignite node1 = grid(0);
TestCache<Integer, MvccTestAccount> cache1 = new TestCache<>(node1.cache(DEFAULT_CACHE_NAME));
final Set<Integer> existedKeys = new HashSet<>(3);
final Set<Integer> nonExistedKeys = new HashSet<>(3);
final Set<Integer> allKeys = generateKeySet(grid(0).cache(DEFAULT_CACHE_NAME), existedKeys, nonExistedKeys);
final Map<Integer, MvccTestAccount> initialMap = existedKeys.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 1)));
Map<Integer, MvccTestAccount> updateMap = existedKeys.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 3)));
cache1.cache.putAll(initialMap);
IgniteTransactions txs = node1.transactions();
try (Transaction tx = txs.txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
for (Integer key : allKeys) {
MvccTestAccount newVal = new MvccTestAccount(key, 2);
if (existedKeys.contains(key)) {
assertTrue(cache1.cache.replace(key, new MvccTestAccount(key, 1), newVal));
assertEquals(newVal, cache1.cache.getAndReplace(key, new MvccTestAccount(key, 3)));
} else {
assertFalse(cache1.cache.replace(key, new MvccTestAccount(key, 1), newVal));
assertNull(cache1.cache.getAndReplace(key, new MvccTestAccount(key, 3)));
}
}
assertEquals(updateMap, getEntries(cache1, allKeys, SQL));
assertEquals(updateMap, getEntries(cache1, allKeys, GET));
tx.commit();
}
assertEquals(updateMap, getEntries(cache1, allKeys, SQL));
assertEquals(updateMap, getEntries(cache1, allKeys, GET));
}
use of org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL in project ignite by apache.
the class MvccRepeatableReadOperationsTest method testGetAndUpdateOperations.
/**
* Check getAndPut/getAndRemove operations consistency.
*
* @throws IgniteCheckedException If failed.
*/
@Test
public void testGetAndUpdateOperations() throws IgniteCheckedException {
Ignite node1 = grid(0);
TestCache<Integer, MvccTestAccount> cache1 = new TestCache<>(node1.cache(DEFAULT_CACHE_NAME));
final Set<Integer> keysForUpdate = new HashSet<>(3);
final Set<Integer> keysForRemove = new HashSet<>(3);
final Set<Integer> allKeys = generateKeySet(grid(0).cache(DEFAULT_CACHE_NAME), keysForUpdate, keysForRemove);
final Map<Integer, MvccTestAccount> initialMap = keysForRemove.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 1)));
Map<Integer, MvccTestAccount> updateMap = keysForUpdate.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 3)));
cache1.cache.putAll(initialMap);
IgniteTransactions txs = node1.transactions();
try (Transaction tx = txs.txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
for (Integer key : keysForUpdate) {
MvccTestAccount newVal1 = new MvccTestAccount(key, 1);
// Check create.
assertNull(cache1.cache.getAndPut(key, newVal1));
MvccTestAccount newVal2 = new MvccTestAccount(key, 2);
// Check update.
assertEquals(newVal1, cache1.cache.getAndPut(key, newVal2));
}
for (Integer key : keysForRemove) {
// Check remove existed.
assertEquals(initialMap.get(key), cache1.cache.getAndRemove(key));
// Check remove non-existed.
assertNull(cache1.cache.getAndRemove(key));
}
for (Integer key : allKeys) {
MvccTestAccount oldVal = new MvccTestAccount(key, 2);
MvccTestAccount newVal = new MvccTestAccount(key, 3);
if (keysForRemove.contains(key))
// Omit update 'null'.
assertNull(cache1.cache.getAndReplace(key, newVal));
else
// Check updated.
assertEquals(oldVal, cache1.cache.getAndReplace(key, newVal));
}
assertEquals(updateMap, getEntries(cache1, allKeys, SQL));
assertEquals(updateMap, getEntries(cache1, allKeys, GET));
tx.commit();
}
assertEquals(updateMap, getEntries(cache1, allKeys, SQL));
assertEquals(updateMap, getEntries(cache1, allKeys, GET));
}
use of org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL in project ignite by apache.
the class MvccRepeatableReadOperationsTest method testPutIfAbsentConsistency.
/**
* Check getAndPut/getAndRemove operations consistency.
*
* @throws IgniteCheckedException If failed.
*/
@Test
public void testPutIfAbsentConsistency() throws IgniteCheckedException {
Ignite node1 = grid(0);
TestCache<Integer, MvccTestAccount> cache1 = new TestCache<>(node1.cache(DEFAULT_CACHE_NAME));
final Set<Integer> keysForCreate = new HashSet<>(3);
final Set<Integer> keysForUpdate = new HashSet<>(3);
final Set<Integer> allKeys = generateKeySet(grid(0).cache(DEFAULT_CACHE_NAME), keysForCreate, keysForUpdate);
final Map<Integer, MvccTestAccount> initialMap = keysForUpdate.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 1)));
Map<Integer, MvccTestAccount> updatedMap = allKeys.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 1)));
cache1.cache.putAll(initialMap);
IgniteTransactions txs = node1.transactions();
try (Transaction tx = txs.txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
for (Integer key : keysForUpdate) // Check update.
assertFalse(cache1.cache.putIfAbsent(key, new MvccTestAccount(key, 2)));
for (Integer key : keysForCreate) // Check create.
assertTrue(cache1.cache.putIfAbsent(key, new MvccTestAccount(key, 1)));
assertEquals(updatedMap, getEntries(cache1, allKeys, SQL));
tx.commit();
}
assertEquals(updatedMap, getEntries(cache1, allKeys, SQL));
assertEquals(updatedMap, getEntries(cache1, allKeys, GET));
}
use of org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL in project ignite by apache.
the class MvccRepeatableReadBulkOpsTest method checkOperations.
/**
* Checks SQL and CacheAPI operation isolation consistency.
*
* @param readModeBefore read mode used before value updated.
* @param readModeAfter read mode used after value updated.
* @param writeMode write mode used for update.
* @throws Exception If failed.
*/
private void checkOperations(ReadMode readModeBefore, ReadMode readModeAfter, WriteMode writeMode, boolean readFromClient) throws Exception {
Ignite node1 = grid(readFromClient ? nodesCount() - 1 : 0);
Ignite node2 = grid(readFromClient ? 0 : nodesCount() - 1);
TestCache<Integer, MvccTestAccount> cache1 = new TestCache<>(node1.cache(DEFAULT_CACHE_NAME));
TestCache<Integer, MvccTestAccount> cache2 = new TestCache<>(node2.cache(DEFAULT_CACHE_NAME));
final Set<Integer> keysForUpdate = new HashSet<>(3);
final Set<Integer> keysForRemove = new HashSet<>(3);
final Set<Integer> allKeys = generateKeySet(grid(0).cache(DEFAULT_CACHE_NAME), keysForUpdate, keysForRemove);
final Map<Integer, MvccTestAccount> initialMap = allKeys.stream().collect(Collectors.toMap(k -> k, k -> new MvccTestAccount(k, 1)));
final Map<Integer, MvccTestAccount> updateMap = keysForUpdate.stream().collect(Collectors.toMap(Function.identity(), k -> new MvccTestAccount(k, 2)));
/* Removed keys are excluded. */
cache1.cache.putAll(initialMap);
IgniteTransactions txs1 = node1.transactions();
IgniteTransactions txs2 = node2.transactions();
CountDownLatch updateStart = new CountDownLatch(1);
CountDownLatch updateFinish = new CountDownLatch(1);
// Start concurrent transactions and check isolation.
IgniteInternalFuture<Void> updater = GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
updateStart.await();
assertEquals(initialMap.size(), cache2.cache.size());
try (Transaction tx = txs2.txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
tx.timeout(TX_TIMEOUT);
updateEntries(cache2, updateMap, writeMode);
removeEntries(cache2, keysForRemove, writeMode);
assertEquals(updateMap, cache2.cache.getAll(allKeys));
tx.commit();
} finally {
updateFinish.countDown();
}
assertEquals(updateMap.size(), cache2.cache.size());
return null;
}
});
IgniteInternalFuture<Void> reader = GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
try (Transaction tx = txs1.txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
assertEquals(initialMap, getEntries(cache1, allKeys, readModeBefore));
checkContains(cache1, true, allKeys);
updateStart.countDown();
updateFinish.await();
assertEquals(initialMap, getEntries(cache1, allKeys, readModeAfter));
checkContains(cache1, true, allKeys);
tx.commit();
}
return null;
}
});
try {
updater.get(3_000, TimeUnit.MILLISECONDS);
reader.get(3_000, TimeUnit.MILLISECONDS);
} catch (Throwable e) {
throw new AssertionError(e);
} finally {
updateStart.countDown();
updateFinish.countDown();
}
assertEquals(updateMap, cache1.cache.getAll(allKeys));
}
Aggregations