use of org.apache.ignite.util.GridCommandHandlerIndexingUtils.Person in project ignite by apache.
the class GridCommandHandlerIndexForceRebuildTest method testIndexRebuildUnderLoad.
/**
* Checks how index force rebuild command behaves when caches are under load.
*
* @throws Exception If failed.
*/
@Test
public void testIndexRebuildUnderLoad() throws Exception {
IgniteEx n = grid(0);
AtomicBoolean stopLoad = new AtomicBoolean(false);
String cacheName1 = "tmpCache1";
String cacheName2 = "tmpCache2";
List<String> caches = F.asList(cacheName1, cacheName2);
try {
for (String c : caches) createAndFillCache(n, c, "tmpGrp");
int cacheSize = n.cache(cacheName1).size();
for (String c : caches) blockRebuildIdx.put(c, new GridFutureAdapter<>());
assertEquals(EXIT_CODE_OK, execute("--cache", "indexes_force_rebuild", "--node-id", n.localNode().id().toString(), "--cache-names", cacheName1 + "," + cacheName2));
IgniteInternalFuture<?> putCacheFut = runAsync(() -> {
ThreadLocalRandom r = ThreadLocalRandom.current();
while (!stopLoad.get()) n.cache(cacheName1).put(r.nextInt(), new Person(r.nextInt(), valueOf(r.nextLong())));
});
assertTrue(waitForCondition(() -> n.cache(cacheName1).size() > cacheSize, getTestTimeout()));
for (String c : caches) {
IgniteInternalFuture<?> rebIdxFut = n.context().query().indexRebuildFuture(CU.cacheId(c));
assertNotNull(rebIdxFut);
assertFalse(rebIdxFut.isDone());
blockRebuildIdx.get(c).get(getTestTimeout());
}
IgniteInternalFuture<Boolean> destroyCacheFut = n.context().cache().dynamicDestroyCache(cacheName2, false, true, false, null);
SchemaIndexCacheFuture intlRebIdxFut = schemaIndexCacheFuture(n, CU.cacheId(cacheName2));
assertNotNull(intlRebIdxFut);
assertTrue(waitForCondition(() -> intlRebIdxFut.cancelToken().cancelException() != null, getTestTimeout()));
stopLoad.set(true);
blockRebuildIdx.clear();
waitForIndexesRebuild(n);
intlRebIdxFut.get(getTestTimeout());
destroyCacheFut.get(getTestTimeout());
putCacheFut.get(getTestTimeout());
injectTestSystemOut();
assertEquals(EXIT_CODE_OK, execute("--cache", "validate_indexes", "--check-crc", cacheName1));
assertContains(log, testOut.toString(), "no issues found.");
} finally {
stopLoad.set(true);
blockRebuildIdx.clear();
n.destroyCache(cacheName1);
n.destroyCache(cacheName2);
}
}
use of org.apache.ignite.util.GridCommandHandlerIndexingUtils.Person in project ignite by apache.
the class GridCommandHandlerIndexingCheckSizeTest method queryEntities.
/**
* Creating {@link QueryEntity}'s with filling functions.
*
* @return {@link QueryEntity}'s with filling functions.
*/
private Map<QueryEntity, Function<Random, Object>> queryEntities() {
Map<QueryEntity, Function<Random, Object>> qryEntities = new HashMap<>();
qryEntities.put(personEntity(), rand -> new Person(rand.nextInt(), valueOf(rand.nextLong())));
qryEntities.put(organizationEntity(), rand -> new Organization(rand.nextInt(), valueOf(rand.nextLong())));
return qryEntities;
}
use of org.apache.ignite.util.GridCommandHandlerIndexingUtils.Person in project ignite by apache.
the class GridCommandHandlerIndexingCheckSizeTest method addColumnAndIdx.
/**
* Adding the "address" column and index for {@link Person} and
* {@link Organization}, with new entries added for each of them.
*
* @param node Node.
* @param cacheName Cache name.
* @param addCnt How many entries add to table.
*/
private void addColumnAndIdx(IgniteEx node, String cacheName, int addCnt) {
IgniteCache<Object, Object> cache = node.cache(cacheName);
cache.query(new SqlFieldsQuery("alter table Person add column orgAddr varchar")).getAll();
cache.query(new SqlFieldsQuery("alter table Organization add column addr varchar")).getAll();
cache.query(new SqlFieldsQuery("create index p_o_addr on Person (orgAddr)")).getAll();
cache.query(new SqlFieldsQuery("create index o_addr on Organization (addr)")).getAll();
int key = node.cachex(cacheName).size();
try (IgniteDataStreamer<Object, Object> streamer = node.dataStreamer(cacheName)) {
ThreadLocalRandom rand = ThreadLocalRandom.current();
for (int i = 0; i < addCnt; i++) {
streamer.addData(key++, new Person(rand.nextInt(), valueOf(rand.nextLong())).orgAddr(valueOf(rand.nextLong())));
streamer.addData(key++, new Organization(rand.nextInt(), valueOf(rand.nextLong())).addr(valueOf(rand.nextLong())));
}
streamer.flush();
}
}
Aggregations