use of org.apache.commons.cli.Options in project hadoop by apache.
the class TimelineSchemaCreator method parseArgs.
/**
* Parse command-line arguments.
*
* @param args
* command line arguments passed to program.
* @return parsed command line.
* @throws ParseException
*/
private static CommandLine parseArgs(String[] args) throws ParseException {
Options options = new Options();
// Input
Option o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true, "entity table name");
o.setArgName("entityTableName");
o.setRequired(false);
options.addOption(o);
o = new Option(TTL_OPTION_SHORT, "metricsTTL", true, "TTL for metrics column family");
o.setArgName("metricsTTL");
o.setRequired(false);
options.addOption(o);
o = new Option(APP_TO_FLOW_TABLE_NAME_SHORT, "appToflowTableName", true, "app to flow table name");
o.setArgName("appToflowTableName");
o.setRequired(false);
options.addOption(o);
o = new Option(APP_TABLE_NAME_SHORT, "applicationTableName", true, "application table name");
o.setArgName("applicationTableName");
o.setRequired(false);
options.addOption(o);
// Options without an argument
// No need to set arg name since we do not need an argument here
o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable", false, "skip existing Hbase tables and continue to create new tables");
o.setRequired(false);
options.addOption(o);
CommandLineParser parser = new PosixParser();
CommandLine commandLine = null;
try {
commandLine = parser.parse(options, args);
} catch (Exception e) {
LOG.error("ERROR: " + e.getMessage() + "\n");
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(NAME + " ", options, true);
System.exit(-1);
}
return commandLine;
}
use of org.apache.commons.cli.Options in project hadoop by apache.
the class ArgumentParser method getOptions.
/**
* @return the option set to be used in command line parsing
*/
private Options getOptions() {
Options cliopt = new Options();
cliopt.addOption(ConfigOption.MAPS);
cliopt.addOption(ConfigOption.REDUCES);
cliopt.addOption(ConfigOption.PACKET_SIZE);
cliopt.addOption(ConfigOption.OPS);
cliopt.addOption(ConfigOption.DURATION);
cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
cliopt.addOption(ConfigOption.SLEEP_TIME);
cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
cliopt.addOption(ConfigOption.FILES);
cliopt.addOption(ConfigOption.DIR_SIZE);
cliopt.addOption(ConfigOption.BASE_DIR);
cliopt.addOption(ConfigOption.RESULT_FILE);
cliopt.addOption(ConfigOption.CLEANUP);
{
String[] distStrs = new String[Distribution.values().length];
Distribution[] distValues = Distribution.values();
for (int i = 0; i < distValues.length; ++i) {
distStrs[i] = distValues[i].lowerName();
}
String opdesc = String.format(Constants.OP_DESCR, StringUtils.arrayToString(distStrs));
for (OperationType type : OperationType.values()) {
String opname = type.lowerName();
cliopt.addOption(new Option(opname, true, opdesc));
}
}
cliopt.addOption(ConfigOption.REPLICATION_AM);
cliopt.addOption(ConfigOption.BLOCK_SIZE);
cliopt.addOption(ConfigOption.READ_SIZE);
cliopt.addOption(ConfigOption.WRITE_SIZE);
cliopt.addOption(ConfigOption.APPEND_SIZE);
cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
cliopt.addOption(ConfigOption.RANDOM_SEED);
cliopt.addOption(ConfigOption.QUEUE_NAME);
cliopt.addOption(ConfigOption.HELP);
return cliopt;
}
use of org.apache.commons.cli.Options in project hadoop by apache.
the class HadoopArchiveLogs method handleOpts.
private void handleOpts(String[] args) throws ParseException {
Options opts = new Options();
Option helpOpt = new Option(HELP_OPTION, false, "Prints this message");
Option maxEligibleOpt = new Option(MAX_ELIGIBLE_APPS_OPTION, true, "The maximum number of eligible apps to process (default: " + DEFAULT_MAX_ELIGIBLE + " (all))");
maxEligibleOpt.setArgName("n");
Option minNumLogFilesOpt = new Option(MIN_NUM_LOG_FILES_OPTION, true, "The minimum number of log files required to be eligible (default: " + DEFAULT_MIN_NUM_LOG_FILES + ")");
minNumLogFilesOpt.setArgName("n");
Option maxTotalLogsSizeOpt = new Option(MAX_TOTAL_LOGS_SIZE_OPTION, true, "The maximum total logs size (in megabytes) required to be eligible" + " (default: " + DEFAULT_MAX_TOTAL_LOGS_SIZE + ")");
maxTotalLogsSizeOpt.setArgName("megabytes");
Option memoryOpt = new Option(MEMORY_OPTION, true, "The amount of memory (in megabytes) for each container (default: " + DEFAULT_MEMORY + ")");
memoryOpt.setArgName("megabytes");
Option verboseOpt = new Option(VERBOSE_OPTION, false, "Print more details.");
Option forceOpt = new Option(FORCE_OPTION, false, "Force recreating the working directory if an existing one is found. " + "This should only be used if you know that another instance is " + "not currently running");
Option noProxyOpt = new Option(NO_PROXY_OPTION, false, "When specified, all processing will be done as the user running this" + " command (or the Yarn user if DefaultContainerExecutor is in " + "use). When not specified, all processing will be done as the " + "user who owns that application; if the user running this command" + " is not allowed to impersonate that user, it will fail");
opts.addOption(helpOpt);
opts.addOption(maxEligibleOpt);
opts.addOption(minNumLogFilesOpt);
opts.addOption(maxTotalLogsSizeOpt);
opts.addOption(memoryOpt);
opts.addOption(verboseOpt);
opts.addOption(forceOpt);
opts.addOption(noProxyOpt);
try {
CommandLineParser parser = new GnuParser();
CommandLine commandLine = parser.parse(opts, args);
if (commandLine.hasOption(HELP_OPTION)) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("mapred archive-logs", opts);
System.exit(0);
}
if (commandLine.hasOption(MAX_ELIGIBLE_APPS_OPTION)) {
maxEligible = Integer.parseInt(commandLine.getOptionValue(MAX_ELIGIBLE_APPS_OPTION));
if (maxEligible == 0) {
LOG.info("Setting " + MAX_ELIGIBLE_APPS_OPTION + " to 0 accomplishes " + "nothing. Please either set it to a negative value " + "(default, all) or a more reasonable value.");
System.exit(0);
}
}
if (commandLine.hasOption(MIN_NUM_LOG_FILES_OPTION)) {
minNumLogFiles = Integer.parseInt(commandLine.getOptionValue(MIN_NUM_LOG_FILES_OPTION));
}
if (commandLine.hasOption(MAX_TOTAL_LOGS_SIZE_OPTION)) {
maxTotalLogsSize = Long.parseLong(commandLine.getOptionValue(MAX_TOTAL_LOGS_SIZE_OPTION));
maxTotalLogsSize *= 1024L * 1024L;
}
if (commandLine.hasOption(MEMORY_OPTION)) {
memory = Long.parseLong(commandLine.getOptionValue(MEMORY_OPTION));
}
if (commandLine.hasOption(VERBOSE_OPTION)) {
verbose = true;
}
if (commandLine.hasOption(FORCE_OPTION)) {
force = true;
}
if (commandLine.hasOption(NO_PROXY_OPTION)) {
proxy = false;
}
} catch (ParseException pe) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("mapred archive-logs", opts);
throw pe;
}
}
use of org.apache.commons.cli.Options in project hadoop by apache.
the class HadoopArchiveLogsRunner method handleOpts.
private void handleOpts(String[] args) throws ParseException {
Options opts = new Options();
Option appIdOpt = new Option(APP_ID_OPTION, true, "Application ID");
appIdOpt.setRequired(true);
Option userOpt = new Option(USER_OPTION, true, "User");
userOpt.setRequired(true);
Option workingDirOpt = new Option(WORKING_DIR_OPTION, true, "Working Directory");
workingDirOpt.setRequired(true);
Option remoteLogDirOpt = new Option(REMOTE_ROOT_LOG_DIR_OPTION, true, "Remote Root Log Directory");
remoteLogDirOpt.setRequired(true);
Option suffixOpt = new Option(SUFFIX_OPTION, true, "Suffix");
suffixOpt.setRequired(true);
Option useProxyOpt = new Option(NO_PROXY_OPTION, false, "Use Proxy");
opts.addOption(appIdOpt);
opts.addOption(userOpt);
opts.addOption(workingDirOpt);
opts.addOption(remoteLogDirOpt);
opts.addOption(suffixOpt);
opts.addOption(useProxyOpt);
CommandLineParser parser = new GnuParser();
CommandLine commandLine = parser.parse(opts, args);
appId = commandLine.getOptionValue(APP_ID_OPTION);
user = commandLine.getOptionValue(USER_OPTION);
workingDir = commandLine.getOptionValue(WORKING_DIR_OPTION);
remoteLogDir = commandLine.getOptionValue(REMOTE_ROOT_LOG_DIR_OPTION);
suffix = commandLine.getOptionValue(SUFFIX_OPTION);
proxy = true;
if (commandLine.hasOption(NO_PROXY_OPTION)) {
proxy = false;
}
}
use of org.apache.commons.cli.Options in project hadoop by apache.
the class RumenToSLSConverter method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption("input", true, "input rumen json file");
options.addOption("outputJobs", true, "output jobs file");
options.addOption("outputNodes", true, "output nodes file");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
if (!cmd.hasOption("input") || !cmd.hasOption("outputJobs") || !cmd.hasOption("outputNodes")) {
System.err.println();
System.err.println("ERROR: Missing input or output file");
System.err.println();
System.err.println("LoadGenerator creates a SLS script " + "from a Hadoop Rumen output");
System.err.println();
System.err.println("Options: -input FILE -outputJobs FILE " + "-outputNodes FILE");
System.err.println();
System.exit(1);
}
String inputFile = cmd.getOptionValue("input");
String outputJsonFile = cmd.getOptionValue("outputJobs");
String outputNodeFile = cmd.getOptionValue("outputNodes");
// check existing
if (!new File(inputFile).exists()) {
System.err.println();
System.err.println("ERROR: input does not exist");
System.exit(1);
}
if (new File(outputJsonFile).exists()) {
System.err.println();
System.err.println("ERROR: output job file is existing");
System.exit(1);
}
if (new File(outputNodeFile).exists()) {
System.err.println();
System.err.println("ERROR: output node file is existing");
System.exit(1);
}
File jsonFile = new File(outputJsonFile);
if (!jsonFile.getParentFile().exists() && !jsonFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: " + jsonFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
File nodeFile = new File(outputNodeFile);
if (!nodeFile.getParentFile().exists() && !nodeFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: " + nodeFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
generateSLSLoadFile(inputFile, outputJsonFile);
generateSLSNodeFile(outputNodeFile);
}
Aggregations