use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class CacheMvccAbstractTest method accountsTxReadAll.
/**
* @param srvs Number of server nodes.
* @param clients Number of client nodes.
* @param cacheBackups Number of cache backups.
* @param cacheParts Number of cache partitions.
* @param cfgC Optional closure applied to cache configuration.
* @param withRmvs If {@code true} then in addition to puts tests also executes removes.
* @param readMode Read mode.
* @param writeMode Write mode.
* @param testTime Test time.
* @throws Exception If failed.
*/
final void accountsTxReadAll(final int srvs, final int clients, int cacheBackups, int cacheParts, @Nullable IgniteInClosure<CacheConfiguration> cfgC, final boolean withRmvs, final ReadMode readMode, final WriteMode writeMode, long testTime, RestartMode restartMode) throws Exception {
final int ACCOUNTS = 20;
final int ACCOUNT_START_VAL = 1000;
final int writers = 4;
final int readers = 4;
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
if (writeMode == WriteMode.PUT) {
Map<Integer, MvccTestAccount> accounts = new HashMap<>();
for (int i = 0; i < ACCOUNTS; i++) accounts.put(i, new MvccTestAccount(ACCOUNT_START_VAL, 1));
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.putAll(accounts);
tx.commit();
}
} else if (writeMode == WriteMode.DML) {
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("insert into MvccTestAccount(_key, val, updateCnt) values " + "(?," + ACCOUNT_START_VAL + ",1)");
for (int i = 0; i < ACCOUNTS; i++) {
try (FieldsQueryCursor<List<?>> cur = cache.query(qry.setArgs(i))) {
assertEquals(1L, cur.iterator().next().get(0));
}
tx.commit();
}
}
} else
assert false : "Unknown write mode";
}
};
final RemovedAccountsTracker rmvdTracker = new RemovedAccountsTracker(ACCOUNTS);
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int cnt = 0;
while (!stop.get()) {
TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
try {
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
cnt++;
int i1 = rnd.nextInt(ACCOUNTS), i2 = rnd.nextInt(ACCOUNTS);
while (i2 == i1) i2 = rnd.nextInt(ACCOUNTS);
Integer id1 = Math.min(i1, i2);
Integer id2 = Math.max(i1, i2);
Set<Integer> keys = new HashSet<>();
keys.add(id1);
keys.add(id2);
Integer cntr1 = null;
Integer cntr2 = null;
Integer rmvd = null;
Integer inserted = null;
MvccTestAccount a1;
MvccTestAccount a2;
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(TX_TIMEOUT);
Map<Integer, MvccTestAccount> accounts = null;
if (writeMode == WriteMode.PUT)
accounts = cache.cache.getAll(keys);
else if (writeMode == WriteMode.DML)
accounts = getAllSql(cache);
else
assert false : "Unknown write mode";
a1 = accounts.get(id1);
a2 = accounts.get(id2);
if (!withRmvs) {
assertNotNull(a1);
assertNotNull(a2);
cntr1 = a1.updateCnt + 1;
cntr2 = a2.updateCnt + 1;
if (writeMode == WriteMode.PUT) {
cache.cache.put(id1, new MvccTestAccount(a1.val + 1, cntr1));
cache.cache.put(id2, new MvccTestAccount(a2.val - 1, cntr2));
} else if (writeMode == WriteMode.DML) {
updateSql(cache, id1, a1.val + 1, cntr1);
updateSql(cache, id2, a2.val - 1, cntr2);
} else
assert false : "Unknown write mode";
} else {
if (a1 != null || a2 != null) {
if (a1 != null && a2 != null) {
if (rnd.nextInt(10) == 0) {
if (rmvdTracker.size() < ACCOUNTS / 2) {
rmvd = rnd.nextBoolean() ? id1 : id2;
assertTrue(rmvdTracker.markRemoved(rmvd));
}
}
if (rmvd != null) {
if (writeMode == WriteMode.PUT) {
if (rmvd.equals(id1)) {
cache.cache.remove(id1);
cache.cache.put(id2, new MvccTestAccount(a1.val + a2.val, 1));
} else {
cache.cache.put(id1, new MvccTestAccount(a1.val + a2.val, 1));
cache.cache.remove(id2);
}
} else if (writeMode == WriteMode.DML) {
if (rmvd.equals(id1)) {
removeSql(cache, id1);
updateSql(cache, id2, a1.val + a2.val, 1);
} else {
updateSql(cache, id1, a1.val + a2.val, 1);
removeSql(cache, id2);
}
} else
assert false : "Unknown write mode";
} else {
if (writeMode == WriteMode.PUT) {
cache.cache.put(id1, new MvccTestAccount(a1.val + 1, 1));
cache.cache.put(id2, new MvccTestAccount(a2.val - 1, 1));
} else if (writeMode == WriteMode.DML) {
updateSql(cache, id1, a1.val + 1, 1);
updateSql(cache, id2, a2.val - 1, 1);
} else
assert false : "Unknown write mode";
}
} else {
if (a1 == null) {
inserted = id1;
if (writeMode == WriteMode.PUT) {
cache.cache.put(id1, new MvccTestAccount(100, 1));
cache.cache.put(id2, new MvccTestAccount(a2.val - 100, 1));
} else if (writeMode == WriteMode.DML) {
insertSql(cache, id1, 100, 1);
updateSql(cache, id2, a2.val - 100, 1);
} else
assert false : "Unknown write mode";
} else {
inserted = id2;
if (writeMode == WriteMode.PUT) {
cache.cache.put(id1, new MvccTestAccount(a1.val - 100, 1));
cache.cache.put(id2, new MvccTestAccount(100, 1));
} else if (writeMode == WriteMode.DML) {
updateSql(cache, id1, a1.val - 100, 1);
insertSql(cache, id2, 100, 1);
} else
assert false : "Unknown write mode";
}
}
}
}
tx.commit();
// In case of tx success mark inserted.
if (inserted != null) {
assert withRmvs;
assertTrue(rmvdTracker.unmarkRemoved(inserted));
}
} catch (Throwable e) {
if (rmvd != null) {
assert withRmvs;
// If tx fails, unmark removed.
assertTrue(rmvdTracker.unmarkRemoved(rmvd));
}
throw e;
}
if (!withRmvs) {
Map<Integer, MvccTestAccount> accounts = null;
if (writeMode == WriteMode.PUT)
accounts = cache.cache.getAll(keys);
else if (writeMode == WriteMode.DML)
accounts = getAllSql(cache);
else
assert false : "Unknown write mode";
a1 = accounts.get(id1);
a2 = accounts.get(id2);
assertNotNull(a1);
assertNotNull(a2);
assertTrue(a1.updateCnt >= cntr1);
assertTrue(a2.updateCnt >= cntr2);
}
} catch (Exception e) {
handleTxException(e);
} finally {
cache.readUnlock();
}
}
info("Writer finished, updates: " + cnt);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
Set<Integer> keys = new LinkedHashSet<>();
Map<Integer, Integer> lastUpdateCntrs = new HashMap<>();
SqlFieldsQuery sumQry = new SqlFieldsQuery("select sum(val) from MvccTestAccount");
while (!stop.get()) {
while (keys.size() < ACCOUNTS) keys.add(rnd.nextInt(ACCOUNTS));
TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
Map<Integer, MvccTestAccount> accounts = null;
try {
switch(readMode) {
case GET:
{
accounts = cache.cache.getAll(keys);
break;
}
case SCAN:
{
accounts = new HashMap<>();
Iterator<Cache.Entry<Integer, MvccTestAccount>> it = cache.cache.iterator();
try {
for (; it.hasNext(); ) {
IgniteCache.Entry<Integer, MvccTestAccount> e = it.next();
MvccTestAccount old = accounts.put(e.getKey(), e.getValue());
assertNull("new=" + e + ", old=" + old, old);
}
} finally {
U.closeQuiet((AutoCloseable) it);
}
break;
}
case SQL:
{
accounts = new HashMap<>();
if (rnd.nextBoolean()) {
SqlQuery<Integer, MvccTestAccount> qry = new SqlQuery<>(MvccTestAccount.class, "_key >= 0");
for (IgniteCache.Entry<Integer, MvccTestAccount> e : cache.cache.query(qry).getAll()) {
MvccTestAccount old = accounts.put(e.getKey(), e.getValue());
assertNull(old);
}
} else {
SqlFieldsQuery qry = new SqlFieldsQuery("select _key, val from MvccTestAccount");
for (List<?> row : cache.cache.query(qry).getAll()) {
Integer id = (Integer) row.get(0);
Integer val = (Integer) row.get(1);
MvccTestAccount old = accounts.put(id, new MvccTestAccount(val, 1));
assertNull(old);
}
}
break;
}
case SQL_SUM:
{
Long sum;
if (rnd.nextBoolean()) {
List<List<?>> res = cache.cache.query(sumQry).getAll();
assertEquals(1, res.size());
sum = (Long) res.get(0).get(0);
} else {
Map res = readAllByMode(cache.cache, keys, readMode, ACCOUNT_CODEC);
sum = (Long) ((Map.Entry) res.entrySet().iterator().next()).getValue();
}
assertEquals(ACCOUNT_START_VAL * ACCOUNTS, sum.intValue());
break;
}
default:
{
fail();
return;
}
}
} finally {
cache.readUnlock();
}
if (accounts != null) {
if (!withRmvs)
assertEquals(ACCOUNTS, accounts.size());
int sum = 0;
for (int i = 0; i < ACCOUNTS; i++) {
MvccTestAccount account = accounts.get(i);
if (account != null) {
sum += account.val;
Integer cntr = lastUpdateCntrs.get(i);
if (cntr != null)
assertTrue(cntr <= account.updateCnt);
lastUpdateCntrs.put(i, cntr);
} else
assertTrue(withRmvs);
}
assertEquals(ACCOUNTS * ACCOUNT_START_VAL, sum);
}
}
if (idx == 0) {
TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
Map<Integer, MvccTestAccount> accounts;
ReadMode readMode0 = readMode == SQL_SUM ? SQL : readMode;
try {
accounts = readAllByMode(cache.cache, keys, readMode0, ACCOUNT_CODEC);
;
} finally {
cache.readUnlock();
}
int sum = 0;
for (int i = 0; i < ACCOUNTS; i++) {
MvccTestAccount account = accounts.get(i);
assertTrue(account != null || withRmvs);
info("Account [id=" + i + ", val=" + (account != null ? account.val : null) + ']');
if (account != null)
sum += account.val;
}
info("Sum: " + sum);
}
}
};
readWriteTest(restartMode, srvs, clients, cacheBackups, cacheParts, writers, readers, testTime, cfgC, init, writer, reader);
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class CacheMvccAbstractTest method readAllByMode.
/**
* Reads value from cache for the given key using given read mode.
*
* @param cache Cache.
* @param keys Key.
* @param readMode Read mode.
* @param codec Value codec.
* @return Value.
*/
@SuppressWarnings("unchecked")
protected Map readAllByMode(IgniteCache cache, Set keys, ReadMode readMode, ObjectCodec codec) {
assert cache != null && keys != null && readMode != null;
assert readMode != SQL || codec != null;
boolean emulateLongQry = ThreadLocalRandom.current().nextBoolean();
switch(readMode) {
case GET:
return cache.getAll(keys);
case SCAN:
ScanQuery scanQry = new ScanQuery(new IgniteBiPredicate() {
@Override
public boolean apply(Object k, Object v) {
if (emulateLongQry)
doSleep(ThreadLocalRandom.current().nextInt(50));
return keys.contains(k);
}
});
Map res;
try (QueryCursor qry = cache.query(scanQry)) {
res = (Map) qry.getAll().stream().collect(Collectors.toMap(v -> ((IgniteBiTuple) v).getKey(), v -> ((IgniteBiTuple) v).getValue()));
assertTrue("res.size()=" + res.size() + ", keys.size()=" + keys.size(), res.size() <= keys.size());
}
return res;
case SQL:
StringBuilder b = new StringBuilder("SELECT " + codec.columnsNames() + " FROM " + codec.tableName() + " WHERE _key IN (");
boolean first = true;
for (Object key : keys) {
if (first)
first = false;
else
b.append(", ");
b.append(key);
}
b.append(')');
String qry = b.toString();
SqlFieldsQuery sqlFieldsQry = new SqlFieldsQuery(qry);
if (emulateLongQry)
sqlFieldsQry.setLazy(true).setPageSize(1);
List<List> rows;
try (FieldsQueryCursor<List> cur = cache.query(sqlFieldsQry)) {
if (emulateLongQry) {
rows = new ArrayList<>();
for (List row : cur) {
rows.add(row);
doSleep(ThreadLocalRandom.current().nextInt(50));
}
} else
rows = cur.getAll();
}
if (rows.isEmpty())
return Collections.emptyMap();
res = new HashMap();
for (List row : rows) res.put(row.get(0), codec.decode(row));
return res;
case SQL_SUM:
b = new StringBuilder("SELECT SUM(" + codec.aggregateColumnName() + ") FROM " + codec.tableName() + " WHERE _key IN (");
first = true;
for (Object key : keys) {
if (first)
first = false;
else
b.append(", ");
b.append(key);
}
b.append(')');
qry = b.toString();
FieldsQueryCursor<List> cur = cache.query(new SqlFieldsQuery(qry));
rows = cur.getAll();
if (rows.isEmpty())
return Collections.emptyMap();
res = new HashMap();
for (List row : rows) res.put(row.get(0), row.get(0));
return res;
default:
throw new AssertionError("Unsupported read mode: " + readMode);
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class CacheMvccAbstractTest method readByMode.
/**
* Reads value from cache for the given key using given read mode.
*
* @param cache Cache.
* @param key Key.
* @param readMode Read mode.
* @param codec Sql object codec.
* @return Value.
*/
@SuppressWarnings("unchecked")
protected Object readByMode(IgniteCache cache, final Object key, ReadMode readMode, ObjectCodec codec) {
assert cache != null && key != null && readMode != null && readMode != SQL_SUM;
assert readMode != SQL || codec != null;
boolean emulateLongQry = ThreadLocalRandom.current().nextBoolean();
switch(readMode) {
case GET:
return cache.get(key);
case SCAN:
ScanQuery scanQry = new ScanQuery(new IgniteBiPredicate() {
@Override
public boolean apply(Object k, Object v) {
if (emulateLongQry)
doSleep(ThreadLocalRandom.current().nextInt(50));
return k.equals(key);
}
});
List res = cache.query(scanQry).getAll();
assertTrue(res.size() <= 1);
return res.isEmpty() ? null : ((IgniteBiTuple) res.get(0)).getValue();
case SQL:
String qry = "SELECT * FROM " + codec.tableName() + " WHERE _key=" + key;
SqlFieldsQuery sqlFieldsQry = new SqlFieldsQuery(qry);
if (emulateLongQry)
sqlFieldsQry.setLazy(true).setPageSize(1);
List<List> rows;
if (emulateLongQry) {
FieldsQueryCursor<List> cur = cache.query(sqlFieldsQry);
rows = new ArrayList<>();
for (List row : cur) {
rows.add(row);
doSleep(ThreadLocalRandom.current().nextInt(50));
}
} else
rows = cache.query(sqlFieldsQry).getAll();
assertTrue(rows.size() <= 1);
return rows.isEmpty() ? null : codec.decode(rows.get(0));
default:
throw new AssertionError("Unsupported read mode: " + readMode);
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class CacheMvccAbstractTest method removeSql.
/**
* Removes account by means of SQL API.
*
* @param cache Cache.
* @param key Key.
*/
protected static void removeSql(TestCache<Integer, MvccTestAccount> cache, Integer key) {
SqlFieldsQuery qry = new SqlFieldsQuery("delete from MvccTestAccount where _key=" + key);
cache.cache.query(qry).getAll();
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class IgnitePdsPageEvictionTest method testPageEvictionSql.
/**
* @throws Exception if failed.
*/
@Test
public void testPageEvictionSql() throws Exception {
IgniteEx ig = grid(0);
ig.active(true);
try (IgniteDataStreamer<DbKey, DbValue> streamer = ig.dataStreamer(CACHE_NAME)) {
for (int i = 0; i < ENTRY_CNT; i++) {
streamer.addData(new DbKey(i), new DbValue(i, "value-" + i, Long.MAX_VALUE - i));
if (i > 0 && i % 10_000 == 0)
info("Done put: " + i);
}
}
IgniteCache<DbKey, DbValue> cache = ignite(0).cache(CACHE_NAME);
int i = 0;
for (Cache.Entry<DbKey, DbValue> entry : cache.query(new ScanQuery<DbKey, DbValue>())) {
assertEquals(Long.MAX_VALUE - entry.getKey().val, entry.getValue().lVal);
if (i > 0 && i % 10_000 == 0)
info("Done get: " + i);
i++;
}
for (i = 0; i < ENTRY_CNT; i++) {
List<List<?>> rows = cache.query(new SqlFieldsQuery("select lVal from DbValue where iVal=?").setArgs(i)).getAll();
assertEquals(1, rows.size());
assertEquals(Long.MAX_VALUE - i, rows.get(0).get(0));
if (i > 0 && i % 10_000 == 0)
info("Done SQL query: " + i);
}
}
Aggregations