use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class CacheMvccSqlQueriesAbstractTest method updateSingleValue.
/**
* @param singleNode {@code True} for test with single node.
* @param locQry Local query flag.
* @throws Exception If failed.
*/
private void updateSingleValue(boolean singleNode, final boolean locQry) throws Exception {
final int VALS = 100;
final int writers = 4;
final int readers = 4;
final int INC_BY = 110;
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
Map<Integer, MvccTestSqlIndexValue> vals = new HashMap<>();
for (int i = 0; i < VALS; i++) vals.put(i, new MvccTestSqlIndexValue(i));
cache.putAll(vals);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int cnt = 0;
while (!stop.get()) {
TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
try {
Integer key = rnd.nextInt(VALS);
while (true) {
try {
cache.cache.invoke(key, new CacheEntryProcessor<Integer, MvccTestSqlIndexValue, Object>() {
@Override
public Object process(MutableEntry<Integer, MvccTestSqlIndexValue> e, Object... args) {
Integer key = e.getKey();
MvccTestSqlIndexValue val = e.getValue();
int newIdxVal;
if (val.idxVal1 < INC_BY) {
assertEquals(key.intValue(), val.idxVal1);
newIdxVal = val.idxVal1 + INC_BY;
} else {
assertEquals(INC_BY + key, val.idxVal1);
newIdxVal = key;
}
e.setValue(new MvccTestSqlIndexValue(newIdxVal));
return null;
}
});
break;
} catch (CacheException e) {
MvccFeatureChecker.assertMvccWriteConflict(e);
}
}
} finally {
cache.readUnlock();
}
}
info("Writer finished, updates: " + cnt);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
List<SqlFieldsQuery> fieldsQrys = new ArrayList<>();
fieldsQrys.add(new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where idxVal1=?").setLocal(locQry));
fieldsQrys.add(new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where idxVal1=? or idxVal1=?").setLocal(locQry));
fieldsQrys.add(new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where _key=?").setLocal(locQry));
List<SqlQuery<Integer, MvccTestSqlIndexValue>> sqlQrys = new ArrayList<>();
sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "idxVal1=?").setLocal(locQry));
sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "idxVal1=? or idxVal1=?").setLocal(locQry));
sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "_key=?").setLocal(locQry));
while (!stop.get()) {
Integer key = rnd.nextInt(VALS);
int qryIdx = rnd.nextInt(3);
TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
List<List<?>> res;
try {
if (rnd.nextBoolean()) {
SqlFieldsQuery qry = fieldsQrys.get(qryIdx);
if (qryIdx == 1)
qry.setArgs(key, key + INC_BY);
else
qry.setArgs(key);
res = cache.cache.query(qry).getAll();
} else {
SqlQuery<Integer, MvccTestSqlIndexValue> qry = sqlQrys.get(qryIdx);
if (qryIdx == 1)
qry.setArgs(key, key + INC_BY);
else
qry.setArgs(key);
res = new ArrayList<>();
for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.cache.query(qry).getAll()) {
List<Object> row = new ArrayList<>(2);
row.add(e.getKey());
row.add(e.getValue().idxVal1);
res.add(row);
}
}
} finally {
cache.readUnlock();
}
assertTrue(qryIdx == 0 || !res.isEmpty());
if (!res.isEmpty()) {
assertEquals(1, res.size());
List<?> resVals = res.get(0);
Integer key0 = (Integer) resVals.get(0);
Integer val0 = (Integer) resVals.get(1);
assertEquals(key, key0);
assertTrue(val0.equals(key) || val0.equals(key + INC_BY));
}
}
if (idx == 0) {
SqlFieldsQuery qry = new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue");
TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
List<List<?>> res;
try {
res = cache.cache.query(qry).getAll();
} finally {
cache.readUnlock();
}
assertEquals(VALS, res.size());
for (List<?> vals : res) info("Value: " + vals);
}
}
};
int srvs;
int clients;
if (singleNode) {
srvs = 1;
clients = 0;
} else {
srvs = 4;
clients = 2;
}
readWriteTest(null, srvs, clients, 0, DFLT_PARTITION_COUNT, writers, readers, DFLT_TEST_TIME, new InitIndexing(Integer.class, MvccTestSqlIndexValue.class), init, writer, reader);
for (Ignite node : G.allGrids()) checkActiveQueriesCleanup(node);
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class RunningQueriesTest method getConfiguration.
/**
* {@inheritDoc}
*/
@Override
protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(gridName);
cfg.setDiscoverySpi(new TcpDiscoverySpi() {
@Override
public void sendCustomEvent(DiscoverySpiCustomMessage msg) throws IgniteException {
if (CustomMessageWrapper.class.isAssignableFrom(msg.getClass())) {
DiscoveryCustomMessage delegate = ((CustomMessageWrapper) msg).delegate();
if (DynamicCacheChangeBatch.class.isAssignableFrom(delegate.getClass())) {
((DynamicCacheChangeBatch) delegate).requests().stream().filter((c) -> !c.cacheName().equalsIgnoreCase("default")).findAny().ifPresent((c) -> {
try {
awaitTimeout();
} catch (Exception e) {
e.printStackTrace();
}
});
} else if (SchemaProposeDiscoveryMessage.class.isAssignableFrom(delegate.getClass())) {
try {
awaitTimeout();
} catch (Exception e) {
e.printStackTrace();
}
}
}
super.sendCustomEvent(msg);
}
});
cfg.setCommunicationSpi(new TcpCommunicationSpi() {
/**
* {@inheritDoc}
*/
@Override
public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
if (GridIoMessage.class.isAssignableFrom(msg.getClass())) {
Message gridMsg = ((GridIoMessage) msg).message();
if (GridNearAtomicSingleUpdateFilterRequest.class.isAssignableFrom(gridMsg.getClass()) || GridNearAtomicFullUpdateRequest.class.isAssignableFrom(gridMsg.getClass())) {
try {
awaitTimeout();
} catch (Exception ignore) {
}
}
}
super.sendMessage(node, msg, ackC);
}
});
return cfg;
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class CacheMvccAbstractTest method updateNObjectsTest.
/**
* @param N Number of object to update in single transaction.
* @param srvs Number of server nodes.
* @param clients Number of client nodes.
* @param cacheBackups Number of cache backups.
* @param cacheParts Number of cache partitions.
* @param time Test time.
* @param readMode Read mode.
* @throws Exception If failed.
*/
@SuppressWarnings("unchecked")
protected void updateNObjectsTest(final int N, final int srvs, final int clients, int cacheBackups, int cacheParts, long time, @Nullable IgniteInClosure<CacheConfiguration> cfgC, ReadMode readMode, WriteMode writeMode, RestartMode restartMode) throws Exception {
final int TOTAL = 20;
assert N <= TOTAL;
info("updateNObjectsTest [n=" + N + ", total=" + TOTAL + ']');
final int writers = 4;
final int readers = 4;
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
Map<Integer, Integer> vals = new LinkedHashMap<>();
for (int i = 0; i < TOTAL; i++) vals.put(i, N);
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
tx.commit();
}
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int cnt = 0;
while (!stop.get()) {
TestCache<Integer, Integer> cache = randomCache(caches, rnd);
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
TreeSet<Integer> keys = new TreeSet<>();
while (keys.size() < N) keys.add(rnd.nextInt(TOTAL));
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(TX_TIMEOUT);
Map<Integer, Integer> curVals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
assertEquals(N, curVals.size());
Map<Integer, Integer> newVals = new TreeMap<>();
for (Map.Entry<Integer, Integer> e : curVals.entrySet()) newVals.put(e.getKey(), e.getValue() + 1);
writeAllByMode(cache.cache, newVals, writeMode, INTEGER_CODEC);
tx.commit();
} catch (Exception e) {
handleTxException(e);
} finally {
cache.readUnlock();
}
cnt++;
}
info("Writer finished, updates: " + cnt);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
Set<Integer> keys = new LinkedHashSet<>();
while (!stop.get()) {
while (keys.size() < TOTAL) keys.add(rnd.nextInt(TOTAL));
TestCache<Integer, Integer> cache = randomCache(caches, rnd);
Map<Integer, Integer> vals = null;
try {
vals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
} catch (Exception e) {
handleTxException(e);
} finally {
cache.readUnlock();
}
assertEquals("vals=" + vals, TOTAL, vals.size());
int sum = 0;
for (int i = 0; i < TOTAL; i++) {
Integer val = vals.get(i);
assertNotNull(val);
sum += val;
}
assertEquals(0, sum % N);
}
if (idx == 0) {
TestCache<Integer, Integer> cache = randomCache(caches, rnd);
Map<Integer, Integer> vals;
try {
vals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
} finally {
cache.readUnlock();
}
int sum = 0;
for (int i = 0; i < TOTAL; i++) {
Integer val = vals.get(i);
info("Value [id=" + i + ", val=" + val + ']');
sum += val;
}
info("Sum [sum=" + sum + ", mod=" + sum % N + ']');
}
}
};
readWriteTest(restartMode, srvs, clients, cacheBackups, cacheParts, writers, readers, time, cfgC, init, writer, reader);
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class IgniteWalReaderTest method testPutAllTxIntoTwoNodes.
/**
* Tests transaction generation and WAL for putAll cache operation.
*
* @throws Exception if failed.
*/
@Test
public void testPutAllTxIntoTwoNodes() throws Exception {
Ignite ignite = startGrid("node0");
Ignite ignite1 = startGrid(1);
ignite.cluster().active(true);
Map<Object, IndexedObject> map = new TreeMap<>();
int cntEntries = 1000;
for (int i = 0; i < cntEntries; i++) map.put(i, new IndexedObject(i));
ignite.cache(CACHE_NAME).putAll(map);
ignite.cluster().active(false);
String subfolderName1 = genDbSubfolderName(ignite, 0);
String subfolderName2 = genDbSubfolderName(ignite1, 1);
stopAllGrids();
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
StringBuilder sb = new StringBuilder();
Map<GridCacheOperation, Integer> operationsFound = new EnumMap<>(GridCacheOperation.class);
IgniteInClosure<DataRecord> drHnd = dataRecord -> {
sb.append("{");
for (int i = 0; i < dataRecord.entryCount(); i++) {
DataEntry entry = dataRecord.get(i);
GridCacheOperation op = entry.op();
Integer cnt = operationsFound.get(op);
operationsFound.put(op, cnt == null ? 1 : (cnt + 1));
if (entry instanceof UnwrapDataEntry) {
final UnwrapDataEntry entry1 = (UnwrapDataEntry) entry;
sb.append(entry1.op()).append(" for ").append(entry1.unwrappedKey());
final GridCacheVersion ver = entry.nearXidVersion();
sb.append(", ");
if (ver != null)
sb.append("tx=").append(ver).append(", ");
}
}
sb.append("}\n");
};
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName1).filesOrDirs(workDir + "/db/wal/" + subfolderName1, workDir + "/db/wal/archive/" + subfolderName1), 1, 1, null, drHnd);
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName2).filesOrDirs(workDir + "/db/wal/" + subfolderName2, workDir + "/db/wal/archive/" + subfolderName2), 1, 1, null, drHnd);
Integer createsFound = operationsFound.get(CREATE);
if (log.isInfoEnabled())
log.info(sb.toString());
assertTrue("Create operations should be found in log: " + operationsFound, createsFound != null && createsFound > 0);
assertTrue("Create operations count should be at least " + cntEntries + " in log: " + operationsFound, createsFound >= cntEntries);
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class IgniteWalReaderTest method testPrimaryFlagOnTwoNodes.
/**
* Tests transaction generation and WAL for putAll cache operation.
*
* @throws Exception if failed.
*/
@Test
public void testPrimaryFlagOnTwoNodes() throws Exception {
backupCnt = 1;
IgniteEx ignite = startGrid("node0");
Ignite ignite1 = startGrid(1);
ignite.cluster().state(ACTIVE);
IgniteCache<Integer, IndexedObject> cache = ignite.cache(CACHE_NAME);
backupCnt = 0;
int cntEntries = 100;
List<Integer> keys = findKeys(ignite.localNode(), cache, cntEntries, 0, 0);
Map<Integer, IndexedObject> map = new TreeMap<>();
for (Integer key : keys) map.putIfAbsent(key, new IndexedObject(key));
cache.putAll(map);
ignite.cluster().active(false);
String subfolderName1 = genDbSubfolderName(ignite, 0);
String subfolderName2 = genDbSubfolderName(ignite1, 1);
stopAllGrids();
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
Map<GridCacheOperation, Integer> operationsFound = new EnumMap<>(GridCacheOperation.class);
IgniteInClosure<DataRecord> drHnd = dataRecord -> {
for (int i = 0; i < dataRecord.entryCount(); i++) {
DataEntry entry = dataRecord.get(i);
GridCacheOperation op = entry.op();
Integer cnt = operationsFound.get(op);
operationsFound.put(op, cnt == null ? 1 : (cnt + 1));
}
};
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName1).filesOrDirs(workDir + "/db/wal/" + subfolderName1, workDir + "/db/wal/archive/" + subfolderName1), 1, 1, null, drHnd);
primary = false;
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName2).filesOrDirs(workDir + "/db/wal/" + subfolderName2, workDir + "/db/wal/archive/" + subfolderName2), 1, 1, null, drHnd);
}
Aggregations