use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class GridClosureProcessorSelfTest method callAsync.
/**
* @param idx Node index.
* @param job Callable job.
* @param p Optional node predicate.
* @return Future object.
*/
private IgniteFuture<Integer> callAsync(int idx, IgniteCallable<Integer> job, @Nullable IgnitePredicate<ClusterNode> p) {
assert idx >= 0 && idx < NODES_CNT;
assert job != null;
execCntr.set(0);
IgniteCompute comp = p != null ? compute(grid(idx).cluster().forPredicate(p)) : grid(idx).compute();
return comp.callAsync(job);
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class GridCachePutAllFailoverSelfTest method checkPutAllFailoverColocated.
/**
* Tests putAll() method along with failover and cache backup.
*
* Checks that the resulting primary cache size is the same as
* expected.
*
* @param near Near enabled.
* @param workerCnt Worker count.
* @param shutdownCnt Shutdown count.
* @throws Exception If failed.
*/
public void checkPutAllFailoverColocated(boolean near, int workerCnt, int shutdownCnt) throws Exception {
nearEnabled = near;
backups = shutdownCnt;
Collection<Integer> testKeys = generateTestKeys();
final Ignite master = startGrid(MASTER);
List<Ignite> workers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) workers.add(startGrid("worker" + i));
info("Master: " + master.cluster().localNode().id());
List<Ignite> runningWorkers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) {
UUID id = workers.get(i - 1).cluster().localNode().id();
info(String.format("Worker%d: %s", i, id));
runningWorkers.add(workers.get(i - 1));
}
try {
Map<UUID, Collection<Integer>> dataChunks = new HashMap<>();
int chunkCntr = 0;
final AtomicBoolean jobFailed = new AtomicBoolean(false);
int failoverPushGap = 0;
final CountDownLatch emptyLatch = new CountDownLatch(1);
final AtomicBoolean inputExhausted = new AtomicBoolean();
IgniteCompute comp = compute(master.cluster().forPredicate(workerNodesFilter));
for (Integer key : testKeys) {
ClusterNode mappedNode = master.affinity(CACHE_NAME).mapKeyToNode(key);
UUID nodeId = mappedNode.id();
Collection<Integer> data = dataChunks.get(nodeId);
if (data == null) {
data = new ArrayList<>(DATA_CHUNK_SIZE);
dataChunks.put(nodeId, data);
}
data.add(key);
if (data.size() == DATA_CHUNK_SIZE) {
// time to send data
chunkCntr++;
log.info("Pushing data chunk [chunkNo=" + chunkCntr + "]");
ComputeTaskFuture<Void> fut = comp.executeAsync(new GridCachePutAllTask(nodeId, CACHE_NAME), data);
// Blocks if queue is full.
resQueue.put(fut);
fut.listen(new CI1<IgniteFuture<Void>>() {
@Override
public void apply(IgniteFuture<Void> f) {
ComputeTaskFuture<?> taskFut = (ComputeTaskFuture<?>) f;
try {
// if something went wrong - we'll get exception here
taskFut.get();
} catch (IgniteException e) {
log.error("Job failed", e);
jobFailed.set(true);
}
// Remove complete future from queue to allow other jobs to proceed.
resQueue.remove(taskFut);
if (inputExhausted.get() && resQueue.isEmpty())
emptyLatch.countDown();
}
});
data = new ArrayList<>(DATA_CHUNK_SIZE);
dataChunks.put(nodeId, data);
if (chunkCntr >= FAIL_ON_CHUNK_NO) {
if (workerCnt - runningWorkers.size() < shutdownCnt) {
if (failoverPushGap > 0)
failoverPushGap--;
else {
Ignite victim = runningWorkers.remove(0);
info("Shutting down node: " + victim.cluster().localNode().id());
stopGrid(victim.name());
// Fail next node after some jobs have been pushed.
failoverPushGap = FAILOVER_PUSH_GAP;
}
}
}
}
}
for (Map.Entry<UUID, Collection<Integer>> entry : dataChunks.entrySet()) {
ComputeTaskFuture<Void> fut = comp.executeAsync(new GridCachePutAllTask(entry.getKey(), CACHE_NAME), entry.getValue());
// Blocks if queue is full.
resQueue.put(fut);
fut.listen(new CI1<IgniteFuture<Void>>() {
@Override
public void apply(IgniteFuture<Void> f) {
ComputeTaskFuture<?> taskFut = (ComputeTaskFuture<?>) f;
try {
// if something went wrong - we'll get exception here
taskFut.get();
} catch (IgniteException e) {
log.error("Job failed", e);
jobFailed.set(true);
}
// Remove complete future from queue to allow other jobs to proceed.
resQueue.remove(taskFut);
if (inputExhausted.get() && resQueue.isEmpty())
emptyLatch.countDown();
}
});
}
inputExhausted.set(true);
if (resQueue.isEmpty())
emptyLatch.countDown();
// Wait for queue to empty.
log.info("Waiting for empty queue...");
boolean failedWait = false;
if (!emptyLatch.await(AWAIT_TIMEOUT_SEC, TimeUnit.SECONDS)) {
info(">>> Failed to wait for queue to empty.");
failedWait = true;
}
if (!failedWait)
assertFalse("One or more jobs have failed.", jobFailed.get());
Collection<Integer> absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
if (!failedWait && !absentKeys.isEmpty()) {
// Give some time to preloader.
U.sleep(15000);
absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
}
info(">>> Absent keys: " + absentKeys);
assertTrue(absentKeys.isEmpty());
// Actual primary cache size.
int primaryCacheSize = 0;
for (Ignite g : runningWorkers) {
info("Cache size [node=" + g.name() + ", localSize=" + g.cache(CACHE_NAME).localSize() + ", localPrimarySize=" + g.cache(CACHE_NAME).localSize(PRIMARY) + ']');
primaryCacheSize += g.cache(CACHE_NAME).localSize(PRIMARY);
}
assertEquals(TEST_MAP_SIZE, primaryCacheSize);
for (Ignite g : runningWorkers) assertEquals(TEST_MAP_SIZE, g.cache(CACHE_NAME).size(PRIMARY));
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class LargeEntryUpdateTest method testEntryUpdate.
/**
* @throws Exception If failed.
*/
public void testEntryUpdate() throws Exception {
try (Ignite ignite = startGrid()) {
for (int i = 0; i < CACHE_COUNT; ++i) {
IgniteCache<Long, byte[]> cache = ignite.cache(CACHE_PREFIX + i);
cache.put(0L, new byte[PAGE_SIZE * 2]);
}
IgniteCompute compute = ignite.compute().withAsync();
long endTime = System.currentTimeMillis() + WAIT_TIMEOUT;
int iter = 0;
while (System.currentTimeMillis() < endTime) {
log.info("Iteration: " + iter++);
cacheUpdate.set(true);
try {
List<IgniteFuture> futs = new ArrayList<>();
for (int i = 0; i < THREAD_COUNT; ++i) {
compute.run(new CacheUpdater());
futs.add(compute.future());
}
Thread.sleep(30_000);
cacheUpdate.set(false);
for (IgniteFuture fut : futs) fut.get();
} finally {
cacheUpdate.set(false);
}
}
}
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class IgniteComputeEmptyClusterGroupTest method testSync.
/**
* @throws Exception If failed.
*/
public void testSync() throws Exception {
ClusterGroup empty = ignite(0).cluster().forNodeId(UUID.randomUUID());
assertEquals(0, empty.nodes().size());
final IgniteCompute comp = ignite(0).compute(empty);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override
public Void call() throws Exception {
comp.affinityRun(DEFAULT_CACHE_NAME, 1, new FailRunnable());
return null;
}
}, ClusterGroupEmptyException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override
public Void call() throws Exception {
comp.apply(new FailClosure(), new Object());
return null;
}
}, ClusterGroupEmptyException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override
public Void call() throws Exception {
comp.affinityCall(DEFAULT_CACHE_NAME, 1, new FailCallable());
return null;
}
}, ClusterGroupEmptyException.class, null);
GridTestUtils.assertThrows(log, new Callable<Void>() {
@Override
public Void call() throws Exception {
comp.broadcast(new FailCallable());
return null;
}
}, ClusterGroupEmptyException.class, null);
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class IgniteComputeEmptyClusterGroupTest method testAsync.
/**
* @throws Exception If failed.
*/
public void testAsync() throws Exception {
ClusterGroup empty = ignite(0).cluster().forNodeId(UUID.randomUUID());
assertEquals(0, empty.nodes().size());
IgniteCompute comp = ignite(0).compute(empty);
checkFutureFails(comp.affinityRunAsync(DEFAULT_CACHE_NAME, 1, new FailRunnable()));
checkFutureFails(comp.applyAsync(new FailClosure(), new Object()));
checkFutureFails(comp.affinityCallAsync(DEFAULT_CACHE_NAME, 1, new FailCallable()));
checkFutureFails(comp.broadcastAsync(new FailCallable()));
}
Aggregations