Search in sources :

Example 1 with Reducer

use of org.apache.hadoop.mapred.Reducer in project hadoop by apache.

the class MergeManagerImpl method combineAndSpill.

private void combineAndSpill(RawKeyValueIterator kvIter, Counters.Counter inCounter) throws IOException {
    JobConf job = jobConf;
    Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
    Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
    Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
    RawComparator<K> comparator = (RawComparator<K>) job.getCombinerKeyGroupingComparator();
    try {
        CombineValuesIterator values = new CombineValuesIterator(kvIter, comparator, keyClass, valClass, job, Reporter.NULL, inCounter);
        while (values.more()) {
            combiner.reduce(values.getKey(), values, combineCollector, Reporter.NULL);
            values.nextKey();
        }
    } finally {
        combiner.close();
    }
}
Also used : CombineValuesIterator(org.apache.hadoop.mapred.Task.CombineValuesIterator) RawComparator(org.apache.hadoop.io.RawComparator) Reducer(org.apache.hadoop.mapred.Reducer) JobConf(org.apache.hadoop.mapred.JobConf)

Example 2 with Reducer

use of org.apache.hadoop.mapred.Reducer in project hadoop by apache.

the class Submitter method run.

@Override
public int run(String[] args) throws Exception {
    CommandLineParser cli = new CommandLineParser();
    if (args.length == 0) {
        cli.printUsage();
        return 1;
    }
    cli.addOption("input", false, "input path to the maps", "path");
    cli.addOption("output", false, "output path from the reduces", "path");
    cli.addOption("jar", false, "job jar file", "path");
    cli.addOption("inputformat", false, "java classname of InputFormat", "class");
    //cli.addArgument("javareader", false, "is the RecordReader in Java");
    cli.addOption("map", false, "java classname of Mapper", "class");
    cli.addOption("partitioner", false, "java classname of Partitioner", "class");
    cli.addOption("reduce", false, "java classname of Reducer", "class");
    cli.addOption("writer", false, "java classname of OutputFormat", "class");
    cli.addOption("program", false, "URI to application executable", "class");
    cli.addOption("reduces", false, "number of reduces", "num");
    cli.addOption("jobconf", false, "\"n1=v1,n2=v2,..\" (Deprecated) Optional. Add or override a JobConf property.", "key=val");
    cli.addOption("lazyOutput", false, "Optional. Create output lazily", "boolean");
    Parser parser = cli.createParser();
    try {
        GenericOptionsParser genericParser = new GenericOptionsParser(getConf(), args);
        CommandLine results = parser.parse(cli.options, genericParser.getRemainingArgs());
        JobConf job = new JobConf(getConf());
        if (results.hasOption("input")) {
            FileInputFormat.setInputPaths(job, results.getOptionValue("input"));
        }
        if (results.hasOption("output")) {
            FileOutputFormat.setOutputPath(job, new Path(results.getOptionValue("output")));
        }
        if (results.hasOption("jar")) {
            job.setJar(results.getOptionValue("jar"));
        }
        if (results.hasOption("inputformat")) {
            setIsJavaRecordReader(job, true);
            job.setInputFormat(getClass(results, "inputformat", job, InputFormat.class));
        }
        if (results.hasOption("javareader")) {
            setIsJavaRecordReader(job, true);
        }
        if (results.hasOption("map")) {
            setIsJavaMapper(job, true);
            job.setMapperClass(getClass(results, "map", job, Mapper.class));
        }
        if (results.hasOption("partitioner")) {
            job.setPartitionerClass(getClass(results, "partitioner", job, Partitioner.class));
        }
        if (results.hasOption("reduce")) {
            setIsJavaReducer(job, true);
            job.setReducerClass(getClass(results, "reduce", job, Reducer.class));
        }
        if (results.hasOption("reduces")) {
            job.setNumReduceTasks(Integer.parseInt(results.getOptionValue("reduces")));
        }
        if (results.hasOption("writer")) {
            setIsJavaRecordWriter(job, true);
            job.setOutputFormat(getClass(results, "writer", job, OutputFormat.class));
        }
        if (results.hasOption("lazyOutput")) {
            if (Boolean.parseBoolean(results.getOptionValue("lazyOutput"))) {
                LazyOutputFormat.setOutputFormatClass(job, job.getOutputFormat().getClass());
            }
        }
        if (results.hasOption("program")) {
            setExecutable(job, results.getOptionValue("program"));
        }
        if (results.hasOption("jobconf")) {
            LOG.warn("-jobconf option is deprecated, please use -D instead.");
            String options = results.getOptionValue("jobconf");
            StringTokenizer tokenizer = new StringTokenizer(options, ",");
            while (tokenizer.hasMoreTokens()) {
                String keyVal = tokenizer.nextToken().trim();
                String[] keyValSplit = keyVal.split("=");
                job.set(keyValSplit[0], keyValSplit[1]);
            }
        }
        // if they gave us a jar file, include it into the class path
        String jarFile = job.getJar();
        if (jarFile != null) {
            final URL[] urls = new URL[] { FileSystem.getLocal(job).pathToFile(new Path(jarFile)).toURL() };
            //FindBugs complains that creating a URLClassLoader should be
            //in a doPrivileged() block. 
            ClassLoader loader = AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {

                public ClassLoader run() {
                    return new URLClassLoader(urls);
                }
            });
            job.setClassLoader(loader);
        }
        runJob(job);
        return 0;
    } catch (ParseException pe) {
        LOG.info("Error : " + pe);
        cli.printUsage();
        return 1;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NullOutputFormat(org.apache.hadoop.mapred.lib.NullOutputFormat) OutputFormat(org.apache.hadoop.mapred.OutputFormat) LazyOutputFormat(org.apache.hadoop.mapred.lib.LazyOutputFormat) FileOutputFormat(org.apache.hadoop.mapred.FileOutputFormat) URL(java.net.URL) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) BasicParser(org.apache.commons.cli.BasicParser) Parser(org.apache.commons.cli.Parser) Mapper(org.apache.hadoop.mapred.Mapper) CommandLine(org.apache.commons.cli.CommandLine) StringTokenizer(java.util.StringTokenizer) InputFormat(org.apache.hadoop.mapred.InputFormat) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) URLClassLoader(java.net.URLClassLoader) URLClassLoader(java.net.URLClassLoader) ParseException(org.apache.commons.cli.ParseException) Reducer(org.apache.hadoop.mapred.Reducer) JobConf(org.apache.hadoop.mapred.JobConf) HashPartitioner(org.apache.hadoop.mapred.lib.HashPartitioner) Partitioner(org.apache.hadoop.mapred.Partitioner) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser)

Example 3 with Reducer

use of org.apache.hadoop.mapred.Reducer in project ignite by apache.

the class HadoopV1ReduceTask method run.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("unchecked")
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopJobEx job = taskCtx.job();
    HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;
    if (!reduce && taskCtx.taskInfo().hasMapperIndex())
        HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
    else
        HadoopMapperUtils.clearMapperIndex();
    try {
        JobConf jobConf = taskCtx0.jobConf();
        HadoopTaskInput input = taskCtx.input();
        HadoopV1OutputCollector collector = null;
        try {
            collector = collector(jobConf, taskCtx0, reduce || !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
            Reducer reducer;
            if (reduce)
                reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(), jobConf);
            else
                reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(), jobConf);
            assert reducer != null;
            try {
                try {
                    while (input.next()) {
                        if (isCancelled())
                            throw new HadoopTaskCancelledException("Reduce task cancelled.");
                        reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
                    }
                    if (!reduce)
                        taskCtx.onMapperFinished();
                } finally {
                    reducer.close();
                }
            } finally {
                collector.closeWriter();
            }
            collector.commit();
        } catch (Exception e) {
            if (collector != null)
                collector.abort();
            throw new IgniteCheckedException(e);
        }
    } finally {
        if (!reduce)
            HadoopMapperUtils.clearMapperIndex();
    }
}
Also used : HadoopTaskInput(org.apache.ignite.internal.processors.hadoop.HadoopTaskInput) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException) Reducer(org.apache.hadoop.mapred.Reducer) JobConf(org.apache.hadoop.mapred.JobConf) HadoopV2TaskContext(org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException)

Example 4 with Reducer

use of org.apache.hadoop.mapred.Reducer in project tez by apache.

the class ReduceProcessor method runOldReducer.

void runOldReducer(JobConf job, final MRTaskReporter reporter, KeyValuesReader input, RawComparator comparator, Class keyClass, Class valueClass, final KeyValueWriter output) throws IOException, InterruptedException {
    Reducer reducer = ReflectionUtils.newInstance(job.getReducerClass(), job);
    // make output collector
    OutputCollector collector = new OutputCollector() {

        public void collect(Object key, Object value) throws IOException {
            output.write(key, value);
        }
    };
    // apply reduce function
    try {
        ReduceValuesIterator values = new ReduceValuesIterator(input, reporter, reduceInputValueCounter);
        values.informReduceProgress();
        while (values.more()) {
            reduceInputKeyCounter.increment(1);
            reducer.reduce(values.getKey(), values, collector, reporter);
            values.informReduceProgress();
        }
        // Set progress to 1.0f if there was no exception,
        reporter.setProgress(1.0f);
        // Clean up: repeated in catch block below
        reducer.close();
    // End of clean up.
    } catch (IOException ioe) {
        try {
            reducer.close();
        } catch (IOException ignored) {
        }
        throw ioe;
    }
}
Also used : OutputCollector(org.apache.hadoop.mapred.OutputCollector) IOException(java.io.IOException) Reducer(org.apache.hadoop.mapred.Reducer)

Example 5 with Reducer

use of org.apache.hadoop.mapred.Reducer in project tez by apache.

the class MRCombiner method runOldCombiner.

// /////////////// Methods for old API //////////////////////
private void runOldCombiner(final TezRawKeyValueIterator rawIter, final Writer writer) throws IOException {
    Class<? extends Reducer> reducerClazz = (Class<? extends Reducer>) conf.getClass("mapred.combiner.class", null, Reducer.class);
    Reducer combiner = ReflectionUtils.newInstance(reducerClazz, conf);
    OutputCollector collector = new OutputCollector() {

        @Override
        public void collect(Object key, Object value) throws IOException {
            writer.append(key, value);
            combineOutputRecordsCounter.increment(1);
        }
    };
    CombinerValuesIterator values = new CombinerValuesIterator(rawIter, keyClass, valClass, comparator);
    while (values.moveToNext()) {
        combiner.reduce(values.getKey(), values.getValues().iterator(), collector, reporter);
    }
}
Also used : OutputCollector(org.apache.hadoop.mapred.OutputCollector) Reducer(org.apache.hadoop.mapred.Reducer) WrappedReducer(org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer)

Aggregations

Reducer (org.apache.hadoop.mapred.Reducer)9 JobConf (org.apache.hadoop.mapred.JobConf)5 OutputCollector (org.apache.hadoop.mapred.OutputCollector)2 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)2 WrappedReducer (org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer)2 IOException (java.io.IOException)1 URL (java.net.URL)1 URLClassLoader (java.net.URLClassLoader)1 StringTokenizer (java.util.StringTokenizer)1 BasicParser (org.apache.commons.cli.BasicParser)1 CommandLine (org.apache.commons.cli.CommandLine)1 ParseException (org.apache.commons.cli.ParseException)1 Parser (org.apache.commons.cli.Parser)1 Path (org.apache.hadoop.fs.Path)1 RawComparator (org.apache.hadoop.io.RawComparator)1 FileInputFormat (org.apache.hadoop.mapred.FileInputFormat)1 FileOutputFormat (org.apache.hadoop.mapred.FileOutputFormat)1 InputFormat (org.apache.hadoop.mapred.InputFormat)1 Mapper (org.apache.hadoop.mapred.Mapper)1 OutputFormat (org.apache.hadoop.mapred.OutputFormat)1