use of org.apache.hadoop.mapred.JobContext in project flink by apache.
the class HadoopUtils method instantiateJobContext.
public static JobContext instantiateJobContext(JobConf jobConf, JobID jobId) throws Exception {
try {
// for Hadoop 1.xx
Class<?> clazz = null;
if (!TaskAttemptContext.class.isInterface()) {
clazz = Class.forName("org.apache.hadoop.mapred.JobContext", true, Thread.currentThread().getContextClassLoader());
} else // for Hadoop 2.xx
{
clazz = Class.forName("org.apache.hadoop.mapred.JobContextImpl", true, Thread.currentThread().getContextClassLoader());
}
Constructor<?> constructor = clazz.getDeclaredConstructor(JobConf.class, org.apache.hadoop.mapreduce.JobID.class);
// for Hadoop 1.xx
constructor.setAccessible(true);
JobContext context = (JobContext) constructor.newInstance(jobConf, jobId);
return context;
} catch (Exception e) {
throw new Exception("Could not create instance of JobContext.", e);
}
}
use of org.apache.hadoop.mapred.JobContext in project flink by apache.
the class HadoopOutputFormatBase method open.
/**
* create the temporary output file for hadoop RecordWriter.
* @param taskNumber The number of the parallel instance.
* @param numTasks The number of parallel tasks.
* @throws java.io.IOException
*/
@Override
public void open(int taskNumber, int numTasks) throws IOException {
// enforce sequential open() calls
synchronized (OPEN_MUTEX) {
if (Integer.toString(taskNumber + 1).length() > 6) {
throw new IOException("Task id too large.");
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_" + String.format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0") + Integer.toString(taskNumber + 1) + "_0");
this.jobConf.set("mapred.task.id", taskAttemptID.toString());
this.jobConf.setInt("mapred.task.partition", taskNumber + 1);
// for hadoop 2.2
this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString());
this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1);
try {
this.context = HadoopUtils.instantiateTaskAttemptContext(this.jobConf, taskAttemptID);
} catch (Exception e) {
throw new RuntimeException(e);
}
this.outputCommitter = this.jobConf.getOutputCommitter();
JobContext jobContext;
try {
jobContext = HadoopUtils.instantiateJobContext(this.jobConf, new JobID());
} catch (Exception e) {
throw new RuntimeException(e);
}
this.outputCommitter.setupJob(jobContext);
this.recordWriter = this.mapredOutputFormat.getRecordWriter(null, this.jobConf, Integer.toString(taskNumber + 1), new HadoopDummyProgressable());
}
}
use of org.apache.hadoop.mapred.JobContext in project flink by apache.
the class HadoopOutputFormatBase method finalizeGlobal.
@Override
public void finalizeGlobal(int parallelism) throws IOException {
try {
JobContext jobContext = HadoopUtils.instantiateJobContext(this.jobConf, new JobID());
OutputCommitter outputCommitter = this.jobConf.getOutputCommitter();
// finalize HDFS output format
outputCommitter.commitJob(jobContext);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.hadoop.mapred.JobContext in project ignite by apache.
the class HadoopV1CleanupTask method run.
/** {@inheritDoc} */
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopV2TaskContext ctx = (HadoopV2TaskContext) taskCtx;
JobContext jobCtx = ctx.jobContext();
try {
OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();
if (abort)
committer.abortJob(jobCtx, JobStatus.State.FAILED);
else
committer.commitJob(jobCtx);
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
}
Aggregations