use of org.apache.ignite.internal.util.GridRandom in project ignite by apache.
the class IgniteDbPutGetAbstractTest method testRandomPut.
/**
*/
public void testRandomPut() {
IgniteEx ig = grid(0);
IgniteCache<Integer, DbValue> cache = ig.cache(DEFAULT_CACHE_NAME);
final int cnt = 1_000;
long seed = System.nanoTime();
X.println("Seed: " + seed);
Random rnd = new GridRandom(seed);
for (int i = 0; i < 500_000; i++) {
int k = rnd.nextInt(cnt);
DbValue v0 = new DbValue(k, "test-value " + k, i);
if (i % 1000 == 0)
X.println(" --> " + i);
cache.put(k, v0);
assertEquals(v0, cache.get(k));
}
}
use of org.apache.ignite.internal.util.GridRandom in project ignite by apache.
the class IgniteDbPutGetAbstractTest method _testRandomPutGetRemove.
public void _testRandomPutGetRemove() {
IgniteEx ig = grid(0);
final IgniteCache<Integer, DbValue> cache = ig.cache(DEFAULT_CACHE_NAME);
int cnt = 100_000;
Map<Integer, DbValue> map = new HashMap<>(cnt);
// System.currentTimeMillis();
long seed = 1460943282308L;
X.println(" seed---> " + seed);
Random rnd = new GridRandom(seed);
for (int i = 0; i < 1000_000; i++) {
if (i % 5000 == 0)
X.println(" --> " + i);
int key = rnd.nextInt(cnt);
DbValue v0 = new DbValue(key, "test-value-" + rnd.nextInt(200), rnd.nextInt(500));
switch(rnd.nextInt(3)) {
case 0:
X.println("Put: " + key + " = " + v0);
assertEquals(map.put(key, v0), cache.getAndPut(key, v0));
case 1:
X.println("Get: " + key);
assertEquals(map.get(key), cache.get(key));
break;
case 2:
X.println("Rmv: " + key);
assertEquals(map.remove(key), cache.getAndRemove(key));
assertNull(cache.get(key));
}
}
for (Integer key : map.keySet()) assertEquals(map.get(key), cache.get(key));
}
use of org.apache.ignite.internal.util.GridRandom in project ignite by apache.
the class IgniteDbPutGetAbstractTest method testPutGetRandomUniqueMultipleObjects.
/**
* @throws Exception if failed.
*/
public void testPutGetRandomUniqueMultipleObjects() throws Exception {
IgniteEx ig = grid(0);
final IgniteCache<Integer, DbValue> cache = ig.cache(DEFAULT_CACHE_NAME);
GridCacheAdapter<Object, Object> internalCache = ig.context().cache().internalCache(DEFAULT_CACHE_NAME);
int cnt = 100_000;
Random rnd = new GridRandom();
int[] keys = generateUniqueRandomKeys(cnt, rnd);
X.println("Put start");
for (int i : keys) {
DbValue v0 = new DbValue(i, "test-value", i);
// if (i % 100 == 0)
// X.println(" --> " + i);
cache.put(i, v0);
checkEmpty(internalCache, i);
assertEquals(v0, cache.get(i));
// for (int j : keys) {
// if (j == i)
// break;
//
// assertEquals( i + ", " + j, new DbValue(j, "test-value", j), cache.get(j));
// }
}
X.println("Get start");
for (int i = 0; i < cnt; i++) {
DbValue v0 = new DbValue(i, "test-value", i);
checkEmpty(internalCache, i);
// X.println(" <-- " + i);
assertEquals(v0, cache.get(i));
}
}
use of org.apache.ignite.internal.util.GridRandom in project ignite by apache.
the class IgniteDbPutGetAbstractTest method testPutPrimaryUniqueSecondaryDuplicates.
/**
* @throws Exception If failed.
*/
public void testPutPrimaryUniqueSecondaryDuplicates() throws Exception {
IgniteEx ig = grid(0);
final IgniteCache<Integer, DbValue> cache = ig.cache(DEFAULT_CACHE_NAME);
GridCacheAdapter<Object, Object> internalCache = ig.context().cache().internalCache(DEFAULT_CACHE_NAME);
int cnt = 100_000;
Random rnd = new GridRandom();
Map<Integer, DbValue> map = new HashMap<>();
int[] keys = generateUniqueRandomKeys(cnt, rnd);
X.println("Put start");
for (int i : keys) {
DbValue v0 = new DbValue(rnd.nextInt(30), "test-value", i);
// X.println(" --> " + i);
cache.put(i, v0);
map.put(i, v0);
checkEmpty(internalCache, i);
assertEquals(v0, cache.get(i));
}
X.println("Get start");
for (int i = 0; i < cnt; i++) {
DbValue v0 = map.get(i);
checkEmpty(internalCache, i);
// X.println(" <-- " + i);
assertEquals(v0, cache.get(i));
}
}
use of org.apache.ignite.internal.util.GridRandom in project ignite by apache.
the class IgniteCacheQueryNodeRestartDistributedJoinSelfTest method restarts.
/**
* @param broadcastQry If {@code true} tests broadcast query.
* @throws Exception If failed.
*/
private void restarts(final boolean broadcastQry) throws Exception {
int duration = 90 * 1000;
int qryThreadNum = 4;
// 4 + 2 = 6 nodes
int restartThreadsNum = 2;
final int nodeLifeTime = 4000;
final int logFreq = 100;
final AtomicIntegerArray locks = new AtomicIntegerArray(totalNodes);
SqlFieldsQuery qry0;
if (broadcastQry)
qry0 = new SqlFieldsQuery(QRY_0_BROADCAST).setDistributedJoins(true).setEnforceJoinOrder(true);
else
qry0 = new SqlFieldsQuery(QRY_0).setDistributedJoins(true);
String plan = queryPlan(grid(0).cache("pu"), qry0);
X.println("Plan1: " + plan);
assertEquals(broadcastQry, plan.contains("batched:broadcast"));
final List<List<?>> pRes = grid(0).cache("pu").query(qry0).getAll();
Thread.sleep(3000);
assertEquals(pRes, grid(0).cache("pu").query(qry0).getAll());
final SqlFieldsQuery qry1;
if (broadcastQry)
qry1 = new SqlFieldsQuery(QRY_1_BROADCAST).setDistributedJoins(true).setEnforceJoinOrder(true);
else
qry1 = new SqlFieldsQuery(QRY_1).setDistributedJoins(true);
plan = queryPlan(grid(0).cache("co"), qry1);
X.println("Plan2: " + plan);
assertEquals(broadcastQry, plan.contains("batched:broadcast"));
final List<List<?>> rRes = grid(0).cache("co").query(qry1).getAll();
assertFalse(pRes.isEmpty());
assertFalse(rRes.isEmpty());
final AtomicInteger qryCnt = new AtomicInteger();
final AtomicBoolean qrysDone = new AtomicBoolean();
final AtomicBoolean fail = new AtomicBoolean();
IgniteInternalFuture<?> fut1 = multithreadedAsync(new CAX() {
@Override
public void applyx() throws IgniteCheckedException {
GridRandom rnd = new GridRandom();
try {
while (!qrysDone.get()) {
int g;
do {
g = rnd.nextInt(locks.length());
if (fail.get())
return;
} while (!locks.compareAndSet(g, 0, 1));
if (rnd.nextBoolean()) {
IgniteCache<?, ?> cache = grid(g).cache("pu");
SqlFieldsQuery qry;
if (broadcastQry)
qry = new SqlFieldsQuery(QRY_0_BROADCAST).setDistributedJoins(true).setEnforceJoinOrder(true);
else
qry = new SqlFieldsQuery(QRY_0).setDistributedJoins(true);
boolean smallPageSize = rnd.nextBoolean();
qry.setPageSize(smallPageSize ? 30 : 1000);
try {
assertEquals(pRes, cache.query(qry).getAll());
} catch (CacheException e) {
assertTrue("On large page size must retry.", smallPageSize);
boolean failedOnRemoteFetch = false;
for (Throwable th = e; th != null; th = th.getCause()) {
if (!(th instanceof CacheException))
continue;
if (th.getMessage() != null && th.getMessage().startsWith("Failed to fetch data from node:")) {
failedOnRemoteFetch = true;
break;
}
}
if (!failedOnRemoteFetch) {
e.printStackTrace();
fail("Must fail inside of GridResultPage.fetchNextPage or subclass.");
}
}
} else {
IgniteCache<?, ?> cache = grid(g).cache("co");
assertEquals(rRes, cache.query(qry1).getAll());
}
locks.set(g, 0);
int c = qryCnt.incrementAndGet();
if (c % logFreq == 0)
info("Executed queries: " + c);
}
} catch (Throwable e) {
e.printStackTrace();
error("Got exception: " + e.getMessage());
fail.set(true);
}
}
}, qryThreadNum, "query-thread");
final AtomicInteger restartCnt = new AtomicInteger();
final AtomicBoolean restartsDone = new AtomicBoolean();
IgniteInternalFuture<?> fut2 = multithreadedAsync(new Callable<Object>() {
@SuppressWarnings({ "BusyWait" })
@Override
public Object call() throws Exception {
try {
GridRandom rnd = new GridRandom();
while (!restartsDone.get()) {
int g;
do {
g = rnd.nextInt(locks.length());
if (fail.get())
return null;
} while (!locks.compareAndSet(g, 0, -1));
log.info("Stop node: " + g);
stopGrid(g);
Thread.sleep(rnd.nextInt(nodeLifeTime));
log.info("Start node: " + g);
startGrid(g);
Thread.sleep(rnd.nextInt(nodeLifeTime));
locks.set(g, 0);
int c = restartCnt.incrementAndGet();
if (c % logFreq == 0)
info("Node restarts: " + c);
}
return true;
} catch (Throwable e) {
e.printStackTrace();
return true;
}
}
}, restartThreadsNum, "restart-thread");
Thread.sleep(duration);
info("Stopping...");
restartsDone.set(true);
qrysDone.set(true);
fut2.get();
fut1.get();
if (fail.get())
fail("See message above");
info("Stopped.");
}
Aggregations