use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.
the class ResourceManager method main.
public static void main(String[] argv) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
try {
Configuration conf = new YarnConfiguration();
GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
argv = hParser.getRemainingArgs();
// If -format-state-store, then delete RMStateStore; else startup normally
if (argv.length >= 1) {
if (argv[0].equals("-format-state-store")) {
deleteRMStateStore(conf);
} else if (argv[0].equals("-remove-application-from-state-store") && argv.length == 2) {
removeApplication(conf, argv[1]);
} else {
printUsage(System.err);
}
} else {
ResourceManager resourceManager = new ResourceManager();
ShutdownHookManager.get().addShutdownHook(new CompositeServiceShutdownHook(resourceManager), SHUTDOWN_HOOK_PRIORITY);
resourceManager.init(conf);
resourceManager.start();
}
} catch (Throwable t) {
LOG.fatal("Error starting ResourceManager", t);
System.exit(-1);
}
}
use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.
the class ValueAggregatorJob method createValueAggregatorJob.
/**
* Create an Aggregate based map/reduce job.
*
* @param args the arguments used for job creation. Generic hadoop
* arguments are accepted.
* @param caller the the caller class.
* @return a JobConf object ready for submission.
*
* @throws IOException
* @see GenericOptionsParser
*/
@SuppressWarnings("rawtypes")
public static JobConf createValueAggregatorJob(String[] args, Class<?> caller) throws IOException {
Configuration conf = new Configuration();
GenericOptionsParser genericParser = new GenericOptionsParser(conf, args);
args = genericParser.getRemainingArgs();
if (args.length < 2) {
System.out.println("usage: inputDirs outDir " + "[numOfReducer [textinputformat|seq [specfile [jobName]]]]");
GenericOptionsParser.printGenericCommandUsage(System.out);
System.exit(1);
}
String inputDir = args[0];
String outputDir = args[1];
int numOfReducers = 1;
if (args.length > 2) {
numOfReducers = Integer.parseInt(args[2]);
}
Class<? extends InputFormat> theInputFormat = TextInputFormat.class;
if (args.length > 3 && args[3].compareToIgnoreCase("textinputformat") == 0) {
theInputFormat = TextInputFormat.class;
} else {
theInputFormat = SequenceFileInputFormat.class;
}
Path specFile = null;
if (args.length > 4) {
specFile = new Path(args[4]);
}
String jobName = "";
if (args.length > 5) {
jobName = args[5];
}
JobConf theJob = new JobConf(conf);
if (specFile != null) {
theJob.addResource(specFile);
}
String userJarFile = theJob.get("user.jar.file");
if (userJarFile == null) {
theJob.setJarByClass(caller != null ? caller : ValueAggregatorJob.class);
} else {
theJob.setJar(userJarFile);
}
theJob.setJobName("ValueAggregatorJob: " + jobName);
FileInputFormat.addInputPaths(theJob, inputDir);
theJob.setInputFormat(theInputFormat);
theJob.setMapperClass(ValueAggregatorMapper.class);
FileOutputFormat.setOutputPath(theJob, new Path(outputDir));
theJob.setOutputFormat(TextOutputFormat.class);
theJob.setMapOutputKeyClass(Text.class);
theJob.setMapOutputValueClass(Text.class);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(ValueAggregatorReducer.class);
theJob.setCombinerClass(ValueAggregatorCombiner.class);
theJob.setNumMapTasks(1);
theJob.setNumReduceTasks(numOfReducers);
return theJob;
}
use of org.apache.hadoop.util.GenericOptionsParser in project Cloud9 by lintool.
the class ClueWebAnchorTextForwardIndexHttpServer method main.
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 3) {
System.out.println("usage: [index-file] [docno-mapping-data-files] [clue-forward-index-root]");
//[clue-forward-index-root: /shared/ClueWeb09/collection.compressed.block/
System.exit(-1);
}
String indexFile = otherArgs[0];
String mappingFile = otherArgs[1];
String clueIndexRoot = otherArgs[2].endsWith("/") ? otherArgs[2] : otherArgs[2] + "/";
String cluewebForwardIndex = "";
for (int i = 1; i < 10; i++) cluewebForwardIndex += clueIndexRoot + "findex.en.0" + i + ".dat" + SEPARATOR + " ";
cluewebForwardIndex += clueIndexRoot + "findex.en.10.dat";
LOG.info("Launching DocumentForwardIndexHttpServer");
LOG.info(" - index file: " + indexFile);
LOG.info(" - docno mapping data file: " + mappingFile);
LOG.info(" - ClueWeb09 index root:" + clueIndexRoot);
FileSystem fs = FileSystem.get(conf);
Random rand = new Random();
int r = rand.nextInt();
// this tmp file as a rendezvous point
Path tmpPath = new Path("/tmp/" + r);
if (fs.exists(tmpPath)) {
fs.delete(tmpPath, true);
}
JobConf job = new JobConf(conf, ClueWebAnchorTextForwardIndexHttpServer.class);
job.setJobName("ForwardIndexServer:" + indexFile);
job.set("mapred.child.java.opts", "-Xmx2048m");
job.setNumMapTasks(1);
job.setNumReduceTasks(0);
job.setInputFormat(NullInputFormat.class);
job.setOutputFormat(NullOutputFormat.class);
job.setMapperClass(ServerMapper.class);
job.set("IndexFile", indexFile);
job.set("DocnoMappingDataFile", mappingFile);
job.set("TmpPath", tmpPath.toString());
job.set("ClueWebIndexFiles", cluewebForwardIndex);
JobClient client = new JobClient(job);
client.submitJob(job);
LOG.info("Waiting for server to start up...");
while (!fs.exists(tmpPath)) {
Thread.sleep(50000);
LOG.info("...");
}
FSDataInputStream in = fs.open(tmpPath);
String host = in.readUTF();
in.close();
LOG.info("host: " + host);
LOG.info("port: 8888");
}
use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.
the class ValueAggregatorJob method createValueAggregatorJob.
/**
* Create an Aggregate based map/reduce job.
*
* @param conf The configuration for job
* @param args the arguments used for job creation. Generic hadoop
* arguments are accepted.
* @return a Job object ready for submission.
*
* @throws IOException
* @see GenericOptionsParser
*/
public static Job createValueAggregatorJob(Configuration conf, String[] args) throws IOException {
GenericOptionsParser genericParser = new GenericOptionsParser(conf, args);
args = genericParser.getRemainingArgs();
if (args.length < 2) {
System.out.println("usage: inputDirs outDir " + "[numOfReducer [textinputformat|seq [specfile [jobName]]]]");
GenericOptionsParser.printGenericCommandUsage(System.out);
System.exit(2);
}
String inputDir = args[0];
String outputDir = args[1];
int numOfReducers = 1;
if (args.length > 2) {
numOfReducers = Integer.parseInt(args[2]);
}
Class<? extends InputFormat> theInputFormat = null;
if (args.length > 3 && args[3].compareToIgnoreCase("textinputformat") == 0) {
theInputFormat = TextInputFormat.class;
} else {
theInputFormat = SequenceFileInputFormat.class;
}
Path specFile = null;
if (args.length > 4) {
specFile = new Path(args[4]);
}
String jobName = "";
if (args.length > 5) {
jobName = args[5];
}
if (specFile != null) {
conf.addResource(specFile);
}
String userJarFile = conf.get(ValueAggregatorJobBase.USER_JAR);
if (userJarFile != null) {
conf.set(MRJobConfig.JAR, userJarFile);
}
Job theJob = Job.getInstance(conf);
if (userJarFile == null) {
theJob.setJarByClass(ValueAggregator.class);
}
theJob.setJobName("ValueAggregatorJob: " + jobName);
FileInputFormat.addInputPaths(theJob, inputDir);
theJob.setInputFormatClass(theInputFormat);
theJob.setMapperClass(ValueAggregatorMapper.class);
FileOutputFormat.setOutputPath(theJob, new Path(outputDir));
theJob.setOutputFormatClass(TextOutputFormat.class);
theJob.setMapOutputKeyClass(Text.class);
theJob.setMapOutputValueClass(Text.class);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(ValueAggregatorReducer.class);
theJob.setCombinerClass(ValueAggregatorCombiner.class);
theJob.setNumReduceTasks(numOfReducers);
return theJob;
}
use of org.apache.hadoop.util.GenericOptionsParser in project hadoop by apache.
the class WordCount method main.
public static void main(String[] args) throws Exception {
final Configuration conf = new Configuration();
final String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
final Job job = Job.getInstance(conf, conf.get(MRJobConfig.JOB_NAME, "word count"));
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
Aggregations