use of org.apache.ignite.internal.processors.hadoop.Hadoop in project ignite by apache.
the class HadoopJobTrackerSelfTest method checkStatus.
/**
* Checks job execution status.
*
* @param jobId Job ID.
* @param complete Completion status.
* @throws Exception If failed.
*/
private void checkStatus(HadoopJobId jobId, boolean complete) throws Exception {
for (int i = 0; i < gridCount(); i++) {
IgniteKernal kernal = (IgniteKernal) grid(i);
Hadoop hadoop = kernal.hadoop();
HadoopJobStatus stat = hadoop.status(jobId);
assert stat != null;
IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
if (!complete)
assertFalse(fut.isDone());
else {
info("Waiting for status future completion on node [idx=" + i + ", nodeId=" + kernal.getLocalNodeId() + ']');
fut.get();
}
}
}
use of org.apache.ignite.internal.processors.hadoop.Hadoop in project ignite by apache.
the class HadoopTaskExecutionSelfTest method testJobKill.
/**
* @throws Exception If failed.
*/
public void testJobKill() throws Exception {
Configuration cfg = prepareJobForCancelling();
Hadoop hadoop = grid(0).hadoop();
HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1);
// Kill unknown job.
boolean killRes = hadoop.kill(jobId);
assertFalse(killRes);
final IgniteInternalFuture<?> fut = hadoop.submit(jobId, createJobInfo(cfg, null));
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return splitsCount.get() > 0;
}
}, 20000)) {
U.dumpThreads(log);
assertTrue(false);
}
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
X.println("___ executed tasks: " + executedTasks.get());
return executedTasks.get() == splitsCount.get();
}
}, 20000)) {
U.dumpThreads(log);
fail();
}
// Kill really ran job.
killRes = hadoop.kill(jobId);
assertTrue(killRes);
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
fut.get();
return null;
}
}, IgniteCheckedException.class, null);
assertEquals(executedTasks.get(), cancelledTasks.get());
// Kill the same job again.
killRes = hadoop.kill(jobId);
assertFalse(killRes);
}
Aggregations