use of org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext in project ignite by apache.
the class HadoopV1ReduceTask method run.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopJobEx job = taskCtx.job();
HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;
if (!reduce && taskCtx.taskInfo().hasMapperIndex())
HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
else
HadoopMapperUtils.clearMapperIndex();
try {
JobConf jobConf = taskCtx0.jobConf();
HadoopTaskInput input = taskCtx.input();
HadoopV1OutputCollector collector = null;
try {
collector = collector(jobConf, taskCtx0, reduce || !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
Reducer reducer;
if (reduce)
reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(), jobConf);
else
reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(), jobConf);
assert reducer != null;
try {
try {
while (input.next()) {
if (isCancelled())
throw new HadoopTaskCancelledException("Reduce task cancelled.");
reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
}
if (!reduce)
taskCtx.onMapperFinished();
} finally {
reducer.close();
}
} finally {
collector.closeWriter();
}
collector.commit();
} catch (Exception e) {
if (collector != null)
collector.abort();
throw new IgniteCheckedException(e);
}
} finally {
if (!reduce)
HadoopMapperUtils.clearMapperIndex();
}
}
use of org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext in project ignite by apache.
the class HadoopV1MapTask method run.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopJobEx job = taskCtx.job();
HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;
if (taskCtx.taskInfo().hasMapperIndex())
HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
else
HadoopMapperUtils.clearMapperIndex();
try {
JobConf jobConf = taskCtx0.jobConf();
InputFormat inFormat = jobConf.getInputFormat();
HadoopInputSplit split = info().inputSplit();
InputSplit nativeSplit;
if (split instanceof HadoopFileBlock) {
HadoopFileBlock block = (HadoopFileBlock) split;
nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS);
} else
nativeSplit = (InputSplit) taskCtx0.getNativeSplit(split);
assert nativeSplit != null;
Reporter reporter = new HadoopV1Reporter(taskCtx);
HadoopV1OutputCollector collector = null;
try {
collector = collector(jobConf, taskCtx0, !job.info().hasCombiner() && !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
RecordReader reader = inFormat.getRecordReader(nativeSplit, jobConf, reporter);
Mapper mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), jobConf);
Object key = reader.createKey();
Object val = reader.createValue();
assert mapper != null;
try {
try {
while (reader.next(key, val)) {
if (isCancelled())
throw new HadoopTaskCancelledException("Map task cancelled.");
mapper.map(key, val, collector, reporter);
}
taskCtx.onMapperFinished();
} finally {
mapper.close();
}
} finally {
collector.closeWriter();
}
collector.commit();
} catch (Exception e) {
if (collector != null)
collector.abort();
throw new IgniteCheckedException(e);
}
} finally {
HadoopMapperUtils.clearMapperIndex();
}
}
use of org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext in project ignite by apache.
the class HadoopV1SetupTask method run.
/** {@inheritDoc} */
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopV2TaskContext ctx = (HadoopV2TaskContext) taskCtx;
try {
ctx.jobConf().getOutputFormat().checkOutputSpecs(null, ctx.jobConf());
OutputCommitter committer = ctx.jobConf().getOutputCommitter();
if (committer != null)
committer.setupJob(ctx.jobContext());
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
}
use of org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext in project ignite by apache.
the class HadoopV1CleanupTask method run.
/** {@inheritDoc} */
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopV2TaskContext ctx = (HadoopV2TaskContext) taskCtx;
JobContext jobCtx = ctx.jobContext();
try {
OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();
if (abort)
committer.abortJob(jobCtx, JobStatus.State.FAILED);
else
committer.commitJob(jobCtx);
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
}
Aggregations