use of org.apache.commons.cli.GnuParser in project hadoop by apache.
the class RumenToSLSConverter method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption("input", true, "input rumen json file");
options.addOption("outputJobs", true, "output jobs file");
options.addOption("outputNodes", true, "output nodes file");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
if (!cmd.hasOption("input") || !cmd.hasOption("outputJobs") || !cmd.hasOption("outputNodes")) {
System.err.println();
System.err.println("ERROR: Missing input or output file");
System.err.println();
System.err.println("LoadGenerator creates a SLS script " + "from a Hadoop Rumen output");
System.err.println();
System.err.println("Options: -input FILE -outputJobs FILE " + "-outputNodes FILE");
System.err.println();
System.exit(1);
}
String inputFile = cmd.getOptionValue("input");
String outputJsonFile = cmd.getOptionValue("outputJobs");
String outputNodeFile = cmd.getOptionValue("outputNodes");
// check existing
if (!new File(inputFile).exists()) {
System.err.println();
System.err.println("ERROR: input does not exist");
System.exit(1);
}
if (new File(outputJsonFile).exists()) {
System.err.println();
System.err.println("ERROR: output job file is existing");
System.exit(1);
}
if (new File(outputNodeFile).exists()) {
System.err.println();
System.err.println("ERROR: output node file is existing");
System.exit(1);
}
File jsonFile = new File(outputJsonFile);
if (!jsonFile.getParentFile().exists() && !jsonFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: " + jsonFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
File nodeFile = new File(outputNodeFile);
if (!nodeFile.getParentFile().exists() && !nodeFile.getParentFile().mkdirs()) {
System.err.println("ERROR: Cannot create output directory in path: " + nodeFile.getParentFile().getAbsoluteFile());
System.exit(1);
}
generateSLSLoadFile(inputFile, outputJsonFile);
generateSLSNodeFile(outputNodeFile);
}
use of org.apache.commons.cli.GnuParser in project hadoop by apache.
the class RegistryCli method rm.
@SuppressWarnings("unchecked")
public int rm(String[] args) {
Option recursive = OptionBuilder.withArgName("recursive").withDescription("delete recursively").create("r");
Options rmOption = new Options();
rmOption.addOption(recursive);
boolean recursiveOpt = false;
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(rmOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("RM requires exactly one path argument", RM_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
if (line.hasOption("r")) {
recursiveOpt = true;
}
registry.delete(argsList.get(1), recursiveOpt);
return 0;
} catch (Exception e) {
syserr.println(analyzeException("rm", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.toString(), RM_USAGE);
}
}
use of org.apache.commons.cli.GnuParser in project hadoop by apache.
the class RegistryCli method resolve.
@SuppressWarnings("unchecked")
public int resolve(String[] args) {
Options resolveOption = new Options();
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(resolveOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("resolve requires exactly one path argument", RESOLVE_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
ServiceRecord record = registry.resolve(argsList.get(1));
for (Endpoint endpoint : record.external) {
sysout.println(" Endpoint(ProtocolType=" + endpoint.protocolType + ", Api=" + endpoint.api + ");" + " Addresses(AddressType=" + endpoint.addressType + ") are: ");
for (Map<String, String> address : endpoint.addresses) {
sysout.println("[ ");
for (Map.Entry<String, String> entry : address.entrySet()) {
sysout.print("\t" + entry.getKey() + ":" + entry.getValue());
}
sysout.println("\n]");
}
sysout.println();
}
return 0;
} catch (Exception e) {
syserr.println(analyzeException("resolve", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp, RESOLVE_USAGE);
}
}
use of org.apache.commons.cli.GnuParser in project hadoop by apache.
the class SLSRunner method main.
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption("inputrumen", true, "input rumen files");
options.addOption("inputsls", true, "input sls files");
options.addOption("nodes", true, "input topology");
options.addOption("output", true, "output directory");
options.addOption("trackjobs", true, "jobs to be tracked during simulating");
options.addOption("printsimulation", false, "print out simulation information");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
String inputRumen = cmd.getOptionValue("inputrumen");
String inputSLS = cmd.getOptionValue("inputsls");
String output = cmd.getOptionValue("output");
if ((inputRumen == null && inputSLS == null) || output == null) {
System.err.println();
System.err.println("ERROR: Missing input or output file");
System.err.println();
System.err.println("Options: -inputrumen|-inputsls FILE,FILE... " + "-output FILE [-nodes FILE] [-trackjobs JobId,JobId...] " + "[-printsimulation]");
System.err.println();
System.exit(1);
}
File outputFile = new File(output);
if (!outputFile.exists() && !outputFile.mkdirs()) {
System.err.println("ERROR: Cannot create output directory " + outputFile.getAbsolutePath());
System.exit(1);
}
Set<String> trackedJobSet = new HashSet<String>();
if (cmd.hasOption("trackjobs")) {
String trackjobs = cmd.getOptionValue("trackjobs");
String[] jobIds = trackjobs.split(",");
trackedJobSet.addAll(Arrays.asList(jobIds));
}
String nodeFile = cmd.hasOption("nodes") ? cmd.getOptionValue("nodes") : "";
boolean isSLS = inputSLS != null;
String[] inputFiles = isSLS ? inputSLS.split(",") : inputRumen.split(",");
SLSRunner sls = new SLSRunner(isSLS, inputFiles, nodeFile, output, trackedJobSet, cmd.hasOption("printsimulation"));
sls.start();
}
use of org.apache.commons.cli.GnuParser in project hadoop by apache.
the class ClusterCLI method run.
@Override
public int run(String[] args) throws Exception {
Options opts = new Options();
opts.addOption("lnl", LIST_LABELS_CMD, false, "List cluster node-label collection");
opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false, "This is DEPRECATED, will be removed in future releases. Directly access node label store, " + "with this option, all node label related operations" + " will NOT connect RM. Instead, they will" + " access/modify stored node labels directly." + " By default, it is false (access via RM)." + " AND PLEASE NOTE: if you configured " + YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR + " to a local directory" + " (instead of NFS or HDFS), this option will only work" + " when the command run on the machine where RM is running." + " Also, this option is UNSTABLE, could be removed in future" + " releases.");
int exitCode = -1;
CommandLine parsedCli = null;
try {
parsedCli = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
sysout.println("Missing argument for options");
printUsage(opts);
return exitCode;
}
if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) {
accessLocal = true;
}
if (parsedCli.hasOption(LIST_LABELS_CMD)) {
printClusterNodeLabels();
} else if (parsedCli.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
}
return 0;
}
Aggregations