Search in sources :

Example 1 with OutputFormat

use of org.apache.hadoop.mapreduce.OutputFormat in project hadoop by apache.

the class TestRecovery method writeOutput.

private void writeOutput(TaskAttempt attempt, Configuration conf) throws Exception {
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, TypeConverter.fromYarn(attempt.getID()));
    TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
    NullWritable nullWritable = NullWritable.get();
    try {
        theRecordWriter.write(key1, val1);
        theRecordWriter.write(null, nullWritable);
        theRecordWriter.write(null, val1);
        theRecordWriter.write(nullWritable, val2);
        theRecordWriter.write(key2, nullWritable);
        theRecordWriter.write(key1, null);
        theRecordWriter.write(null, null);
        theRecordWriter.write(key2, val2);
    } finally {
        theRecordWriter.close(tContext);
    }
    OutputFormat outputFormat = ReflectionUtils.newInstance(tContext.getOutputFormatClass(), conf);
    OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
    committer.commitTask(tContext);
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) TextOutputFormat(org.apache.hadoop.mapreduce.lib.output.TextOutputFormat) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) OutputFormat(org.apache.hadoop.mapreduce.OutputFormat) TextOutputFormat(org.apache.hadoop.mapreduce.lib.output.TextOutputFormat) FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) NullWritable(org.apache.hadoop.io.NullWritable)

Example 2 with OutputFormat

use of org.apache.hadoop.mapreduce.OutputFormat in project hadoop by apache.

the class Join method run.

/**
   * The main driver for sort program.
   * Invoke this method to submit the map/reduce job.
   * @throws IOException When there is communication problems with the 
   *                     job tracker.
   */
@SuppressWarnings("unchecked")
public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    JobClient client = new JobClient(conf);
    ClusterStatus cluster = client.getClusterStatus();
    int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
    String join_reduces = conf.get(REDUCES_PER_HOST);
    if (join_reduces != null) {
        num_reduces = cluster.getTaskTrackers() * Integer.parseInt(join_reduces);
    }
    Job job = Job.getInstance(conf);
    job.setJobName("join");
    job.setJarByClass(Sort.class);
    job.setMapperClass(Mapper.class);
    job.setReducerClass(Reducer.class);
    Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
    Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
    Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
    Class<? extends Writable> outputValueClass = TupleWritable.class;
    String op = "inner";
    List<String> otherArgs = new ArrayList<String>();
    for (int i = 0; i < args.length; ++i) {
        try {
            if ("-r".equals(args[i])) {
                num_reduces = Integer.parseInt(args[++i]);
            } else if ("-inFormat".equals(args[i])) {
                inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
            } else if ("-outFormat".equals(args[i])) {
                outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
            } else if ("-outKey".equals(args[i])) {
                outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
            } else if ("-outValue".equals(args[i])) {
                outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
            } else if ("-joinOp".equals(args[i])) {
                op = args[++i];
            } else {
                otherArgs.add(args[i]);
            }
        } catch (NumberFormatException except) {
            System.out.println("ERROR: Integer expected instead of " + args[i]);
            return printUsage();
        } catch (ArrayIndexOutOfBoundsException except) {
            System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
            // exits
            return printUsage();
        }
    }
    // Set user-supplied (possibly default) job configs
    job.setNumReduceTasks(num_reduces);
    if (otherArgs.size() < 2) {
        System.out.println("ERROR: Wrong number of parameters: ");
        return printUsage();
    }
    FileOutputFormat.setOutputPath(job, new Path(otherArgs.remove(otherArgs.size() - 1)));
    List<Path> plist = new ArrayList<Path>(otherArgs.size());
    for (String s : otherArgs) {
        plist.add(new Path(s));
    }
    job.setInputFormatClass(CompositeInputFormat.class);
    job.getConfiguration().set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose(op, inputFormatClass, plist.toArray(new Path[0])));
    job.setOutputFormatClass(outputFormatClass);
    job.setOutputKeyClass(outputKeyClass);
    job.setOutputValueClass(outputValueClass);
    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    int ret = job.waitForCompletion(true) ? 0 : 1;
    Date end_time = new Date();
    System.out.println("Job ended: " + end_time);
    System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
    return ret;
}
Also used : SequenceFileOutputFormat(org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) SequenceFileInputFormat(org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat) ArrayList(java.util.ArrayList) OutputFormat(org.apache.hadoop.mapreduce.OutputFormat) FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) SequenceFileOutputFormat(org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat) Writable(org.apache.hadoop.io.Writable) TupleWritable(org.apache.hadoop.mapreduce.lib.join.TupleWritable) BytesWritable(org.apache.hadoop.io.BytesWritable) BytesWritable(org.apache.hadoop.io.BytesWritable) TupleWritable(org.apache.hadoop.mapreduce.lib.join.TupleWritable) JobClient(org.apache.hadoop.mapred.JobClient) Date(java.util.Date) Job(org.apache.hadoop.mapreduce.Job) ClusterStatus(org.apache.hadoop.mapred.ClusterStatus)

Example 3 with OutputFormat

use of org.apache.hadoop.mapreduce.OutputFormat in project ignite by apache.

the class HadoopV2SetupTask method run0.

/** {@inheritDoc} */
@SuppressWarnings("ConstantConditions")
@Override
protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
    try {
        JobContextImpl jobCtx = taskCtx.jobContext();
        OutputFormat outputFormat = getOutputFormat(jobCtx);
        outputFormat.checkOutputSpecs(jobCtx);
        OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
        if (committer != null)
            committer.setupJob(jobCtx);
    } catch (ClassNotFoundException | IOException e) {
        throw new IgniteCheckedException(e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IgniteInterruptedCheckedException(e);
    }
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) JobContextImpl(org.apache.hadoop.mapred.JobContextImpl) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) OutputFormat(org.apache.hadoop.mapreduce.OutputFormat) IOException(java.io.IOException)

Example 4 with OutputFormat

use of org.apache.hadoop.mapreduce.OutputFormat in project ignite by apache.

the class HadoopV2Task method prepareWriter.

/**
     * Put write into Hadoop context and return associated output format instance.
     *
     * @param jobCtx Job context.
     * @return Output format.
     * @throws IgniteCheckedException In case of Grid exception.
     * @throws InterruptedException In case of interrupt.
     */
protected OutputFormat prepareWriter(JobContext jobCtx) throws IgniteCheckedException, InterruptedException {
    try {
        OutputFormat outputFormat = getOutputFormat(jobCtx);
        assert outputFormat != null;
        OutputCommitter outCommitter = outputFormat.getOutputCommitter(hadoopCtx);
        if (outCommitter != null)
            outCommitter.setupTask(hadoopCtx);
        RecordWriter writer = outputFormat.getRecordWriter(hadoopCtx);
        hadoopCtx.writer(writer);
        return outputFormat;
    } catch (IOException | ClassNotFoundException e) {
        throw new IgniteCheckedException(e);
    }
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) OutputFormat(org.apache.hadoop.mapreduce.OutputFormat) IOException(java.io.IOException)

Example 5 with OutputFormat

use of org.apache.hadoop.mapreduce.OutputFormat in project cdap by caskdata.

the class MultipleOutputsMainOutputWrapper method getRootOutputFormat.

// the root OutputFormat is used only for writing, not for checking output specs or committing of the output
// because the root is also in the delegates, which check the output spec and commit the output.
private OutputFormat<K, V> getRootOutputFormat(JobContext context) {
    if (innerFormat == null) {
        Configuration conf = context.getConfiguration();
        @SuppressWarnings("unchecked") Class<OutputFormat<K, V>> c = (Class<OutputFormat<K, V>>) conf.getClass(ROOT_OUTPUT_FORMAT, FileOutputFormat.class);
        innerFormat = ReflectionUtils.newInstance(c, conf);
    }
    return innerFormat;
}
Also used : FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) Configuration(org.apache.hadoop.conf.Configuration) FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) OutputFormat(org.apache.hadoop.mapreduce.OutputFormat)

Aggregations

OutputFormat (org.apache.hadoop.mapreduce.OutputFormat)9 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)5 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)5 JobContextImpl (org.apache.hadoop.mapred.JobContextImpl)4 FileOutputFormat (org.apache.hadoop.mapreduce.lib.output.FileOutputFormat)4 IgniteInterruptedCheckedException (org.apache.ignite.internal.IgniteInterruptedCheckedException)4 IOException (java.io.IOException)3 RecordWriter (org.apache.hadoop.mapreduce.RecordWriter)3 Configuration (org.apache.hadoop.conf.Configuration)2 NullWritable (org.apache.hadoop.io.NullWritable)2 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)2 TextOutputFormat (org.apache.hadoop.mapreduce.lib.output.TextOutputFormat)2 TaskAttemptContextImpl (org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl)2 ArrayList (java.util.ArrayList)1 Date (java.util.Date)1 Path (org.apache.hadoop.fs.Path)1 BytesWritable (org.apache.hadoop.io.BytesWritable)1 Writable (org.apache.hadoop.io.Writable)1 ClusterStatus (org.apache.hadoop.mapred.ClusterStatus)1 JobClient (org.apache.hadoop.mapred.JobClient)1