Search in sources :

Example 6 with GenericOptionsParser

use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.

the class ReliabilityTest method run.

public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    if ("local".equals(conf.get(JTConfig.JT_IPC_ADDRESS, "local"))) {
        displayUsage();
    }
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length == 2) {
        if (otherArgs[0].equals("-scratchdir")) {
            dir = otherArgs[1];
        } else {
            displayUsage();
        }
    } else if (otherArgs.length == 0) {
        dir = System.getProperty("user.dir");
    } else {
        displayUsage();
    }
    //to protect against the case of jobs failing even when multiple attempts
    //fail, set some high values for the max attempts
    conf.setInt(JobContext.MAP_MAX_ATTEMPTS, 10);
    conf.setInt(JobContext.REDUCE_MAX_ATTEMPTS, 10);
    runSleepJobTest(new JobClient(new JobConf(conf)), conf);
    runSortJobTests(new JobClient(new JobConf(conf)), conf);
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser)

Example 7 with GenericOptionsParser

use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.

the class Submitter method run.

@Override
public int run(String[] args) throws Exception {
    CommandLineParser cli = new CommandLineParser();
    if (args.length == 0) {
        cli.printUsage();
        return 1;
    }
    cli.addOption("input", false, "input path to the maps", "path");
    cli.addOption("output", false, "output path from the reduces", "path");
    cli.addOption("jar", false, "job jar file", "path");
    cli.addOption("inputformat", false, "java classname of InputFormat", "class");
    //cli.addArgument("javareader", false, "is the RecordReader in Java");
    cli.addOption("map", false, "java classname of Mapper", "class");
    cli.addOption("partitioner", false, "java classname of Partitioner", "class");
    cli.addOption("reduce", false, "java classname of Reducer", "class");
    cli.addOption("writer", false, "java classname of OutputFormat", "class");
    cli.addOption("program", false, "URI to application executable", "class");
    cli.addOption("reduces", false, "number of reduces", "num");
    cli.addOption("jobconf", false, "\"n1=v1,n2=v2,..\" (Deprecated) Optional. Add or override a JobConf property.", "key=val");
    cli.addOption("lazyOutput", false, "Optional. Create output lazily", "boolean");
    Parser parser = cli.createParser();
    try {
        GenericOptionsParser genericParser = new GenericOptionsParser(getConf(), args);
        CommandLine results = parser.parse(cli.options, genericParser.getRemainingArgs());
        JobConf job = new JobConf(getConf());
        if (results.hasOption("input")) {
            FileInputFormat.setInputPaths(job, results.getOptionValue("input"));
        }
        if (results.hasOption("output")) {
            FileOutputFormat.setOutputPath(job, new Path(results.getOptionValue("output")));
        }
        if (results.hasOption("jar")) {
            job.setJar(results.getOptionValue("jar"));
        }
        if (results.hasOption("inputformat")) {
            setIsJavaRecordReader(job, true);
            job.setInputFormat(getClass(results, "inputformat", job, InputFormat.class));
        }
        if (results.hasOption("javareader")) {
            setIsJavaRecordReader(job, true);
        }
        if (results.hasOption("map")) {
            setIsJavaMapper(job, true);
            job.setMapperClass(getClass(results, "map", job, Mapper.class));
        }
        if (results.hasOption("partitioner")) {
            job.setPartitionerClass(getClass(results, "partitioner", job, Partitioner.class));
        }
        if (results.hasOption("reduce")) {
            setIsJavaReducer(job, true);
            job.setReducerClass(getClass(results, "reduce", job, Reducer.class));
        }
        if (results.hasOption("reduces")) {
            job.setNumReduceTasks(Integer.parseInt(results.getOptionValue("reduces")));
        }
        if (results.hasOption("writer")) {
            setIsJavaRecordWriter(job, true);
            job.setOutputFormat(getClass(results, "writer", job, OutputFormat.class));
        }
        if (results.hasOption("lazyOutput")) {
            if (Boolean.parseBoolean(results.getOptionValue("lazyOutput"))) {
                LazyOutputFormat.setOutputFormatClass(job, job.getOutputFormat().getClass());
            }
        }
        if (results.hasOption("program")) {
            setExecutable(job, results.getOptionValue("program"));
        }
        if (results.hasOption("jobconf")) {
            LOG.warn("-jobconf option is deprecated, please use -D instead.");
            String options = results.getOptionValue("jobconf");
            StringTokenizer tokenizer = new StringTokenizer(options, ",");
            while (tokenizer.hasMoreTokens()) {
                String keyVal = tokenizer.nextToken().trim();
                String[] keyValSplit = keyVal.split("=");
                job.set(keyValSplit[0], keyValSplit[1]);
            }
        }
        // if they gave us a jar file, include it into the class path
        String jarFile = job.getJar();
        if (jarFile != null) {
            final URL[] urls = new URL[] { FileSystem.getLocal(job).pathToFile(new Path(jarFile)).toURL() };
            //FindBugs complains that creating a URLClassLoader should be
            //in a doPrivileged() block. 
            ClassLoader loader = AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {

                public ClassLoader run() {
                    return new URLClassLoader(urls);
                }
            });
            job.setClassLoader(loader);
        }
        runJob(job);
        return 0;
    } catch (ParseException pe) {
        LOG.info("Error : " + pe);
        cli.printUsage();
        return 1;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NullOutputFormat(org.apache.hadoop.mapred.lib.NullOutputFormat) OutputFormat(org.apache.hadoop.mapred.OutputFormat) LazyOutputFormat(org.apache.hadoop.mapred.lib.LazyOutputFormat) FileOutputFormat(org.apache.hadoop.mapred.FileOutputFormat) URL(java.net.URL) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) BasicParser(org.apache.commons.cli.BasicParser) Parser(org.apache.commons.cli.Parser) Mapper(org.apache.hadoop.mapred.Mapper) CommandLine(org.apache.commons.cli.CommandLine) StringTokenizer(java.util.StringTokenizer) InputFormat(org.apache.hadoop.mapred.InputFormat) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) URLClassLoader(java.net.URLClassLoader) URLClassLoader(java.net.URLClassLoader) ParseException(org.apache.commons.cli.ParseException) Reducer(org.apache.hadoop.mapred.Reducer) JobConf(org.apache.hadoop.mapred.JobConf) HashPartitioner(org.apache.hadoop.mapred.lib.HashPartitioner) Partitioner(org.apache.hadoop.mapred.Partitioner) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser)

Example 8 with GenericOptionsParser

use of org.apache.hadoop.util.GenericOptionsParser in project flink by apache.

the class ParameterTool method fromGenericOptionsParser.

/**
	 * Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}
	 *
	 * @param args Input array arguments. It should be parsable by {@link GenericOptionsParser}
	 * @return A {@link ParameterTool}
	 * @throws IOException If arguments cannot be parsed by {@link GenericOptionsParser}
	 * @see GenericOptionsParser
	 * @deprecated Please use {@link org.apache.flink.hadoopcompatibility.HadoopUtils#paramsFromGenericOptionsParser(String[])}
	 * from project flink-hadoop-compatibility
	 */
@Deprecated
@PublicEvolving
public static ParameterTool fromGenericOptionsParser(String[] args) throws IOException {
    Option[] options = new GenericOptionsParser(args).getCommandLine().getOptions();
    Map<String, String> map = new HashMap<String, String>();
    for (Option option : options) {
        String[] split = option.getValue().split("=");
        map.put(split[0], split[1]);
    }
    return fromMap(map);
}
Also used : HashMap(java.util.HashMap) Option(org.apache.commons.cli.Option) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) PublicEvolving(org.apache.flink.annotation.PublicEvolving)

Example 9 with GenericOptionsParser

use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.

the class DataNode method instantiateDataNode.

/** Instantiate a single datanode object, along with its secure resources. 
   * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
   * subsequently. 
   */
public static DataNode instantiateDataNode(String[] args, Configuration conf, SecureResources resources) throws IOException {
    if (conf == null)
        conf = new HdfsConfiguration();
    if (args != null) {
        // parse generic hadoop options
        GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
        args = hParser.getRemainingArgs();
    }
    if (!parseArguments(args, conf)) {
        printUsage(System.err);
        return null;
    }
    Collection<StorageLocation> dataLocations = getStorageLocations(conf);
    UserGroupInformation.setConfiguration(conf);
    SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, getHostName(conf));
    return makeInstance(dataLocations, conf, resources);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser)

Example 10 with GenericOptionsParser

use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.

the class NameNode method createNameNode.

public static NameNode createNameNode(String[] argv, Configuration conf) throws IOException {
    LOG.info("createNameNode " + Arrays.asList(argv));
    if (conf == null)
        conf = new HdfsConfiguration();
    // Parse out some generic args into Configuration.
    GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
    argv = hParser.getRemainingArgs();
    // Parse the rest, NN specific args.
    StartupOption startOpt = parseArguments(argv);
    if (startOpt == null) {
        printUsage(System.err);
        return null;
    }
    setStartupOption(conf, startOpt);
    boolean aborted = false;
    switch(startOpt) {
        case FORMAT:
            aborted = format(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid javac warning
            return null;
        case GENCLUSTERID:
            System.err.println("Generating new cluster id:");
            System.out.println(NNStorage.newClusterID());
            terminate(0);
            return null;
        case ROLLBACK:
            aborted = doRollback(conf, true);
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BOOTSTRAPSTANDBY:
            String[] toolArgs = Arrays.copyOfRange(argv, 1, argv.length);
            int rc = BootstrapStandby.run(toolArgs, conf);
            terminate(rc);
            // avoid warning
            return null;
        case INITIALIZESHAREDEDITS:
            aborted = initializeSharedEdits(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BACKUP:
        case CHECKPOINT:
            NamenodeRole role = startOpt.toNodeRole();
            DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
            return new BackupNode(conf, role);
        case RECOVER:
            NameNode.doRecovery(startOpt, conf);
            return null;
        case METADATAVERSION:
            printMetadataVersion(conf);
            terminate(0);
            // avoid javac warning
            return null;
        case UPGRADEONLY:
            DefaultMetricsSystem.initialize("NameNode");
            new NameNode(conf);
            terminate(0);
            return null;
        default:
            DefaultMetricsSystem.initialize("NameNode");
            return new NameNode(conf);
    }
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) NamenodeRole(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole)

Aggregations

GenericOptionsParser (org.apache.hadoop.util.GenericOptionsParser)102 Configuration (org.apache.hadoop.conf.Configuration)72 Path (org.apache.hadoop.fs.Path)38 Job (org.apache.hadoop.mapreduce.Job)35 CommandLine (org.apache.commons.cli.CommandLine)18 IOException (java.io.IOException)15 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)11 PosixParser (org.apache.commons.cli.PosixParser)10 FileSystem (org.apache.hadoop.fs.FileSystem)10 HCatSchema (org.apache.hive.hcatalog.data.schema.HCatSchema)10 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)9 ParseException (org.apache.commons.cli.ParseException)7 Test (org.junit.jupiter.api.Test)7 ArrayList (java.util.ArrayList)6 Options (org.apache.commons.cli.Options)6 JobConf (org.apache.hadoop.mapred.JobConf)6 File (java.io.File)5 HashMap (java.util.HashMap)5 YarnUncaughtExceptionHandler (org.apache.hadoop.yarn.YarnUncaughtExceptionHandler)5 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)5