use of org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException in project ignite by apache.
the class HadoopV2TaskContext method run.
/** {@inheritDoc} */
@Override
public void run() throws IgniteCheckedException {
ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(jobConf().getClassLoader());
try {
try {
task = createTask();
} catch (Throwable e) {
if (e instanceof Error)
throw e;
throw transformException(e);
}
if (cancelled)
throw new HadoopTaskCancelledException("Task cancelled.");
try {
task.run(this);
} catch (Throwable e) {
if (e instanceof Error)
throw e;
throw transformException(e);
}
} finally {
task = null;
HadoopCommonUtils.restoreContextClassLoader(oldLdr);
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException in project ignite by apache.
the class HadoopJobTracker method killJob.
/**
* Kills job.
*
* @param jobId Job ID.
* @return {@code True} if job was killed.
* @throws IgniteCheckedException If failed.
*/
public boolean killJob(HadoopJobId jobId) throws IgniteCheckedException {
if (!busyLock.tryReadLock())
// Grid is stopping.
return false;
try {
HadoopJobMetadata meta = jobMetaCache().get(jobId);
if (meta != null && meta.phase() != PHASE_COMPLETE && meta.phase() != PHASE_CANCELLING) {
HadoopTaskCancelledException err = new HadoopTaskCancelledException("Job cancelled.");
jobMetaCache().invoke(jobId, new CancelJobProcessor(null, err));
}
} finally {
busyLock.readUnlock();
}
IgniteInternalFuture<?> fut = finishFuture(jobId);
if (fut != null) {
try {
fut.get();
} catch (Exception e) {
if (e.getCause() instanceof HadoopTaskCancelledException)
return true;
}
}
return false;
}
use of org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException in project ignite by apache.
the class HadoopRunnableTask method runTask.
/**
* @param perfCntr Performance counter.
* @throws IgniteCheckedException If failed.
*/
private void runTask(HadoopPerformanceCounter perfCntr) throws IgniteCheckedException {
if (cancelled)
throw new HadoopTaskCancelledException("Task cancelled.");
try (HadoopTaskOutput out = createOutputInternal(ctx);
HadoopTaskInput in = createInputInternal(ctx)) {
ctx.input(in);
ctx.output(out);
perfCntr.onTaskStart(ctx.taskInfo(), U.currentTimeMillis());
ctx.run();
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException in project ignite by apache.
the class HadoopRunnableTask method call0.
/**
* Implements actual task running.
* @throws IgniteCheckedException On error.
*/
void call0() throws IgniteCheckedException {
execStartTs = U.currentTimeMillis();
Throwable err = null;
HadoopTaskState state = HadoopTaskState.COMPLETED;
HadoopPerformanceCounter perfCntr = null;
try {
perfCntr = HadoopPerformanceCounter.getCounter(ctx.counters(), nodeId);
perfCntr.onTaskSubmit(info, submitTs);
perfCntr.onTaskPrepare(info, execStartTs);
ctx.prepareTaskEnvironment();
runTask(perfCntr);
if (info.type() == MAP && job.info().hasCombiner()) {
// Switch to combiner.
HadoopTaskInfo combineTaskInfo = new HadoopTaskInfo(COMBINE, info.jobId(), info.taskNumber(), info.attempt(), null);
// Mapper and combiner share the same index.
if (ctx.taskInfo().hasMapperIndex())
combineTaskInfo.mapperIndex(ctx.taskInfo().mapperIndex());
ctx.taskInfo(combineTaskInfo);
try {
runTask(perfCntr);
} finally {
ctx.taskInfo(info);
}
}
} catch (HadoopTaskCancelledException ignored) {
state = HadoopTaskState.CANCELED;
} catch (Throwable e) {
state = HadoopTaskState.FAILED;
err = e;
U.error(log, "Task execution failed.", e);
if (e instanceof Error)
throw e;
} finally {
execEndTs = U.currentTimeMillis();
if (perfCntr != null)
perfCntr.onTaskFinish(info, execEndTs);
onTaskFinished(new HadoopTaskStatus(state, err, ctx == null ? null : ctx.counters()));
if (combinerInput != null)
combinerInput.close();
if (ctx != null)
ctx.cleanupTaskEnvironment();
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException in project ignite by apache.
the class HadoopV1ReduceTask method run.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopJobEx job = taskCtx.job();
HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;
if (!reduce && taskCtx.taskInfo().hasMapperIndex())
HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
else
HadoopMapperUtils.clearMapperIndex();
try {
JobConf jobConf = taskCtx0.jobConf();
HadoopTaskInput input = taskCtx.input();
HadoopV1OutputCollector collector = null;
try {
collector = collector(jobConf, taskCtx0, reduce || !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
Reducer reducer;
if (reduce)
reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(), jobConf);
else
reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(), jobConf);
assert reducer != null;
try {
try {
while (input.next()) {
if (isCancelled())
throw new HadoopTaskCancelledException("Reduce task cancelled.");
reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
}
if (!reduce)
taskCtx.onMapperFinished();
} finally {
reducer.close();
}
} finally {
collector.closeWriter();
}
collector.commit();
} catch (Exception e) {
if (collector != null)
collector.abort();
throw new IgniteCheckedException(e);
}
} finally {
if (!reduce)
HadoopMapperUtils.clearMapperIndex();
}
}
Aggregations