use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class TaskNodeRestartTest method testTaskNodeRestart.
/**
* @throws Exception If failed.
*/
@Test
public void testTaskNodeRestart() throws Exception {
final AtomicBoolean finished = new AtomicBoolean();
final AtomicInteger stopIdx = new AtomicInteger();
IgniteInternalFuture<?> restartFut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
int idx = stopIdx.getAndIncrement();
int node = NODES + idx;
while (!finished.get()) {
log.info("Start node: " + node);
startGrid(node);
U.sleep(300);
log.info("Stop node: " + node);
stopGrid(node);
}
return null;
}
}, 2, "stop-thread");
IgniteInternalFuture<?> fut = null;
try {
final long stopTime = System.currentTimeMillis() + SF.applyLB(30_000, 10_000);
final AtomicInteger idx = new AtomicInteger();
fut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
int node = idx.getAndIncrement() % NODES;
Ignite ignite = ignite(node);
log.info("Start thread: " + ignite.name());
IgniteCompute compute = ignite.compute();
while (U.currentTimeMillis() < stopTime) {
try {
compute.broadcast(new TestCallable());
compute.call(new TestCallable());
compute.execute(new TestTask1(), null);
compute.execute(new TestTask2(), null);
} catch (IgniteException e) {
log.info("Error: " + e);
}
}
return null;
}
}, 20, "test-thread");
fut.get(90_000);
finished.set(true);
restartFut.get();
} finally {
finished.set(true);
if (fut != null)
fut.cancel();
restartFut.get(5000);
}
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class GridCachePutAllFailoverSelfTest method checkPutAllFailover.
/**
* Tests putAll() method along with failover and cache backup.
*
* Checks that the resulting primary cache size is the same as
* expected.
*
* @param near Near enabled.
* @param workerCnt Worker count.
* @param shutdownCnt Shutdown count.
* @throws Exception If failed.
*/
public void checkPutAllFailover(boolean near, int workerCnt, int shutdownCnt) throws Exception {
nearEnabled = near;
backups = shutdownCnt;
Collection<Integer> testKeys = generateTestKeys();
final Ignite master = startClientGrid(MASTER);
List<Ignite> workers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) workers.add(startGrid("worker" + i));
info("Master: " + master.cluster().localNode().id());
List<Ignite> runningWorkers = new ArrayList<>(workerCnt);
for (int i = 1; i <= workerCnt; i++) {
UUID id = workers.get(i - 1).cluster().localNode().id();
info(String.format("Worker%d - %s", i, id));
runningWorkers.add(workers.get(i - 1));
}
try {
// Dummy call to fetch affinity function from remote node
master.affinity(CACHE_NAME).mapKeyToNode("Dummy");
Random rnd = new Random();
Collection<Integer> dataChunk = new ArrayList<>(DATA_CHUNK_SIZE);
int entryCntr = 0;
int chunkCntr = 0;
final AtomicBoolean jobFailed = new AtomicBoolean(false);
int failoverPushGap = 0;
final CountDownLatch emptyLatch = new CountDownLatch(1);
final AtomicBoolean inputExhausted = new AtomicBoolean();
IgniteCompute comp = compute(master.cluster().forPredicate(workerNodesFilter));
for (Integer key : testKeys) {
dataChunk.add(key);
entryCntr++;
if (entryCntr == DATA_CHUNK_SIZE) {
// time to send data
chunkCntr++;
assert dataChunk.size() == DATA_CHUNK_SIZE;
log.info("Pushing data chunk [chunkNo=" + chunkCntr + "]");
ComputeTaskFuture<Void> fut = comp.executeAsync(new GridCachePutAllTask(runningWorkers.get(rnd.nextInt(runningWorkers.size())).cluster().localNode().id(), CACHE_NAME), dataChunk);
// Blocks if queue is full.
resQueue.put(fut);
fut.listen(new CI1<IgniteFuture<Void>>() {
@Override
public void apply(IgniteFuture<Void> f) {
ComputeTaskFuture<?> taskFut = (ComputeTaskFuture<?>) f;
try {
// if something went wrong - we'll get exception here
taskFut.get();
} catch (IgniteException e) {
log.error("Job failed", e);
jobFailed.set(true);
}
// Remove complete future from queue to allow other jobs to proceed.
resQueue.remove(taskFut);
if (inputExhausted.get() && resQueue.isEmpty())
emptyLatch.countDown();
}
});
entryCntr = 0;
dataChunk = new ArrayList<>(DATA_CHUNK_SIZE);
if (chunkCntr >= FAIL_ON_CHUNK_NO) {
if (workerCnt - runningWorkers.size() < shutdownCnt) {
if (failoverPushGap > 0)
failoverPushGap--;
else {
Ignite victim = runningWorkers.remove(0);
info("Shutting down node: " + victim.cluster().localNode().id());
stopGrid(victim.name());
// Fail next node after some jobs have been pushed.
failoverPushGap = FAILOVER_PUSH_GAP;
}
}
}
}
}
inputExhausted.set(true);
if (resQueue.isEmpty())
emptyLatch.countDown();
assert chunkCntr == TEST_MAP_SIZE / DATA_CHUNK_SIZE;
// Wait for queue to empty.
log.info("Waiting for empty queue...");
boolean failedWait = false;
if (!emptyLatch.await(AWAIT_TIMEOUT_SEC, TimeUnit.SECONDS)) {
info(">>> Failed to wait for queue to empty.");
failedWait = true;
}
if (!failedWait)
assertFalse("One or more jobs have failed.", jobFailed.get());
Collection<Integer> absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
if (!failedWait && !absentKeys.isEmpty()) {
// Give some time to preloader.
U.sleep(20000);
absentKeys = findAbsentKeys(runningWorkers.get(0), testKeys);
}
info(">>> Absent keys: " + absentKeys);
if (!F.isEmpty(absentKeys)) {
for (Ignite g : runningWorkers) {
IgniteKernal k = (IgniteKernal) g;
info(">>>> Entries on node: " + k.localNodeId());
GridCacheAdapter<Object, Object> cache = k.internalCache("partitioned");
for (Integer key : absentKeys) {
GridCacheEntryEx entry = cache.peekEx(key);
if (entry != null)
info(" >>> " + entry);
if (cache.context().isNear()) {
GridCacheEntryEx entry0 = cache.context().near().dht().peekEx(key);
if (entry0 != null)
info(" >>> " + entry);
}
}
info("");
}
}
assertTrue(absentKeys.isEmpty());
// Actual primary cache size.
int primaryCacheSize = 0;
for (Ignite g : runningWorkers) {
info("Cache size [node=" + g.name() + ", localSize=" + g.cache(CACHE_NAME).localSize() + ", localPrimarySize=" + g.cache(CACHE_NAME).localSize(PRIMARY) + ']');
primaryCacheSize += ((IgniteKernal) g).internalCache(CACHE_NAME).primarySize();
}
assertEquals(TEST_MAP_SIZE, primaryCacheSize);
for (Ignite g : runningWorkers) assertEquals(TEST_MAP_SIZE, g.cache(CACHE_NAME).size(PRIMARY));
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class GridCacheAbstractDataStructuresFailoverSelfTest method checkAtomicSequenceInitialization.
/**
* @param limitProjection {@code True} if test should call init only on stable nodes.
* @throws Exception If failed.
*/
private void checkAtomicSequenceInitialization(boolean limitProjection) throws Exception {
int threadCnt = 3;
IgniteCompute compute;
if (limitProjection) {
List<UUID> nodeIds = new ArrayList<>(gridCount());
for (int i = 0; i < gridCount(); i++) nodeIds.add(grid(i).cluster().localNode().id());
compute = grid(0).compute(grid(0).cluster().forNodeIds(nodeIds));
} else
compute = grid(0).compute();
final AtomicInteger idx = new AtomicInteger(gridCount());
IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(new CA() {
@Override
public void apply() {
int id = idx.getAndIncrement();
try {
log.info("Start node: " + id);
startGrid(id);
Thread.sleep(1000);
} catch (Exception e) {
throw F.wrap(e);
} finally {
stopGrid(id);
info("Thread finished.");
}
}
}, threadCnt, "test-thread");
while (!fut.isDone()) {
compute.call(new IgniteCallable<Object>() {
/**
*/
@IgniteInstanceResource
private Ignite g;
@Override
public Object call() {
try {
IgniteAtomicSequence seq = g.atomicSequence(STRUCTURE_NAME, 1, true);
assert seq != null;
for (int i = 0; i < 1000; i++) seq.getAndIncrement();
return null;
} catch (IgniteException e) {
// Fail if we are on stable nodes or exception is not node stop.
if (limitProjection || !X.hasCause(e, NodeStoppingException.class))
throw e;
return null;
}
}
});
}
fut.get();
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class IgniteRejectConnectOnNodeStopTest method testNodeStop.
/**
* @throws Exception If failed.
*/
@Test
public void testNodeStop() throws Exception {
Ignite srv = startGrid(0);
final Ignite c = startClientGrid(1);
ClusterGroup grp = srv.cluster().forClients();
IgniteCompute srvCompute = srv.compute(grp);
srvCompute.call(new DummyClosure());
IgniteInternalFuture fut = GridTestUtils.runAsync(new Runnable() {
@Override
public void run() {
IgniteCache cache = c.cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 100_000; i++) {
try {
cache.put(1, 1);
} catch (Exception ignore) {
break;
}
}
}
}, "cache-put");
U.sleep(100);
final CountDownLatch stopStartLatch = new CountDownLatch(1);
IgniteInternalFuture<?> fut2 = GridTestUtils.runAsync(new Runnable() {
@Override
public void run() {
stopStartLatch.countDown();
c.close();
}
});
boolean err = false;
try {
stopStartLatch.await();
IgniteCacheMessageRecoveryAbstractTest.closeSessions(srv);
long stopTime = U.currentTimeMillis() + 10_000;
while (U.currentTimeMillis() < stopTime) {
try {
srvCompute.call(new DummyClosure());
} catch (ClusterTopologyException e) {
err = true;
assertFalse(fut2.isDone());
break;
}
}
} finally {
stopLatch.countDown();
}
fut.get();
fut2.get();
assertTrue("Failed to get excpected error", err);
}
use of org.apache.ignite.IgniteCompute in project ignite by apache.
the class CacheManualRebalancingTest method testRebalance.
/**
* @throws Exception If failed.
*/
@Test
public void testRebalance() throws Exception {
// Fill cache with large dataset to make rebalancing slow.
try (IgniteDataStreamer<Object, Object> streamer = grid(0).dataStreamer(MYCACHE)) {
for (int i = 0; i < 100_000; i++) streamer.addData(i, i);
}
// Start new node.
final IgniteEx newNode = startGrid(NODES_CNT);
int newNodeCacheSize;
// Start manual rebalancing.
IgniteCompute compute = newNode.compute();
final IgniteFuture<?> rebalanceTaskFuture = compute.broadcastAsync(new MyCallable());
boolean rebalanceFinished = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return rebalanceTaskFuture.isDone();
}
}, 10_000);
assertTrue(rebalanceFinished);
assertTrue(newNode.context().cache().cache(MYCACHE).context().preloader().rebalanceFuture().isDone());
newNodeCacheSize = newNode.cache(MYCACHE).localSize(CachePeekMode.ALL);
System.out.println("New node cache local size: " + newNodeCacheSize);
assertTrue(newNodeCacheSize > 0);
}
Aggregations