use of org.apache.commons.cli.Options in project hadoop by apache.
the class DiskBalancerCLI method run.
/**
* Execute the command with the given arguments.
*
* @param args command specific arguments.
* @return exit code.
* @throws Exception
*/
@Override
public int run(String[] args) throws Exception {
Options opts = getOpts();
CommandLine cmd = parseArgs(args, opts);
return dispatch(cmd, opts);
}
use of org.apache.commons.cli.Options in project DataX by alibaba.
the class Engine method entry.
public static void entry(final String[] args) throws Throwable {
Options options = new Options();
options.addOption("job", true, "Job config.");
options.addOption("jobid", true, "Job unique id.");
options.addOption("mode", true, "Job runtime mode.");
BasicParser parser = new BasicParser();
CommandLine cl = parser.parse(options, args);
String jobPath = cl.getOptionValue("job");
// 如果用户没有明确指定jobid, 则 datax.py 会指定 jobid 默认值为-1
String jobIdString = cl.getOptionValue("jobid");
RUNTIME_MODE = cl.getOptionValue("mode");
Configuration configuration = ConfigParser.parse(jobPath);
long jobId;
if (!"-1".equalsIgnoreCase(jobIdString)) {
jobId = Long.parseLong(jobIdString);
} else {
// only for dsc & ds & datax 3 update
String dscJobUrlPatternString = "/instance/(\\d{1,})/config.xml";
String dsJobUrlPatternString = "/inner/job/(\\d{1,})/config";
String dsTaskGroupUrlPatternString = "/inner/job/(\\d{1,})/taskGroup/";
List<String> patternStringList = Arrays.asList(dscJobUrlPatternString, dsJobUrlPatternString, dsTaskGroupUrlPatternString);
jobId = parseJobIdFromUrl(patternStringList, jobPath);
}
boolean isStandAloneMode = "standalone".equalsIgnoreCase(RUNTIME_MODE);
if (!isStandAloneMode && jobId == -1) {
// 如果不是 standalone 模式,那么 jobId 一定不能为-1
throw DataXException.asDataXException(FrameworkErrorCode.CONFIG_ERROR, "非 standalone 模式必须在 URL 中提供有效的 jobId.");
}
configuration.set(CoreConstant.DATAX_CORE_CONTAINER_JOB_ID, jobId);
//打印vmInfo
VMInfo vmInfo = VMInfo.getVmInfo();
if (vmInfo != null) {
LOG.info(vmInfo.toString());
}
LOG.info("\n" + Engine.filterJobConfiguration(configuration) + "\n");
LOG.debug(configuration.toJSON());
ConfigurationValidate.doValidate(configuration);
Engine engine = new Engine();
engine.start(configuration);
}
use of org.apache.commons.cli.Options in project hive by apache.
the class StreamingIntegrationTester method main.
public static void main(String[] args) {
try {
LogUtils.initHiveLog4j();
} catch (LogUtils.LogInitializationException e) {
System.err.println("Unable to initialize log4j " + StringUtils.stringifyException(e));
System.exit(-1);
}
Options options = new Options();
options.addOption(OptionBuilder.hasArg().withArgName("abort-pct").withDescription("Percentage of transactions to abort, defaults to 5").withLongOpt("abortpct").create('a'));
options.addOption(OptionBuilder.hasArgs().withArgName("column-names").withDescription("column names of table to write to").withLongOpt("columns").withValueSeparator(',').isRequired().create('c'));
options.addOption(OptionBuilder.hasArg().withArgName("database").withDescription("Database of table to write to").withLongOpt("database").isRequired().create('d'));
options.addOption(OptionBuilder.hasArg().withArgName("frequency").withDescription("How often to commit a transaction, in seconds, defaults to 1").withLongOpt("frequency").create('f'));
options.addOption(OptionBuilder.hasArg().withArgName("iterations").withDescription("Number of batches to write, defaults to 10").withLongOpt("num-batches").create('i'));
options.addOption(OptionBuilder.hasArg().withArgName("metastore-uri").withDescription("URI of Hive metastore").withLongOpt("metastore-uri").isRequired().create('m'));
options.addOption(OptionBuilder.hasArg().withArgName("num_transactions").withDescription("Number of transactions per batch, defaults to 100").withLongOpt("num-txns").create('n'));
options.addOption(OptionBuilder.hasArgs().withArgName("partition-values").withDescription("partition values, must be provided in order of partition columns, " + "if not provided table is assumed to not be partitioned").withLongOpt("partition").withValueSeparator(',').create('p'));
options.addOption(OptionBuilder.hasArg().withArgName("records-per-transaction").withDescription("records to write in each transaction, defaults to 100").withLongOpt("records-per-txn").withValueSeparator(',').create('r'));
options.addOption(OptionBuilder.hasArgs().withArgName("column-types").withDescription("column types, valid values are string, int, float, decimal, date, " + "datetime").withLongOpt("schema").withValueSeparator(',').isRequired().create('s'));
options.addOption(OptionBuilder.hasArg().withArgName("table").withDescription("Table to write to").withLongOpt("table").isRequired().create('t'));
options.addOption(OptionBuilder.hasArg().withArgName("num-writers").withDescription("Number of writers to create, defaults to 2").withLongOpt("writers").create('w'));
options.addOption(OptionBuilder.hasArg(false).withArgName("pause").withDescription("Wait on keyboard input after commit & batch close. default: disabled").withLongOpt("pause").create('x'));
Parser parser = new GnuParser();
CommandLine cmdline = null;
try {
cmdline = parser.parse(options, args);
} catch (ParseException e) {
System.err.println(e.getMessage());
usage(options);
}
boolean pause = cmdline.hasOption('x');
String db = cmdline.getOptionValue('d');
String table = cmdline.getOptionValue('t');
String uri = cmdline.getOptionValue('m');
int txnsPerBatch = Integer.parseInt(cmdline.getOptionValue('n', "100"));
int writers = Integer.parseInt(cmdline.getOptionValue('w', "2"));
int batches = Integer.parseInt(cmdline.getOptionValue('i', "10"));
int recordsPerTxn = Integer.parseInt(cmdline.getOptionValue('r', "100"));
int frequency = Integer.parseInt(cmdline.getOptionValue('f', "1"));
int ap = Integer.parseInt(cmdline.getOptionValue('a', "5"));
float abortPct = ((float) ap) / 100.0f;
String[] partVals = cmdline.getOptionValues('p');
String[] cols = cmdline.getOptionValues('c');
String[] types = cmdline.getOptionValues('s');
StreamingIntegrationTester sit = new StreamingIntegrationTester(db, table, uri, txnsPerBatch, writers, batches, recordsPerTxn, frequency, abortPct, partVals, cols, types, pause);
sit.go();
}
use of org.apache.commons.cli.Options in project hadoop by apache.
the class TestGenericOptionsParser method testCreateWithOptions.
/**
* Test that options passed to the constructor are used.
*/
@SuppressWarnings("static-access")
@Test
public void testCreateWithOptions() throws Exception {
// Create new option newOpt
Option opt = OptionBuilder.withArgName("int").hasArg().withDescription("A new option").create("newOpt");
Options opts = new Options();
opts.addOption(opt);
// Check newOpt is actually used to parse the args
String[] args = new String[2];
args[0] = "--newOpt";
args[1] = "7";
GenericOptionsParser g = new GenericOptionsParser(opts, args);
assertEquals("New option was ignored", "7", g.getCommandLine().getOptionValues("newOpt")[0]);
}
use of org.apache.commons.cli.Options in project opennms by OpenNMS.
the class VmwareConfigBuilder method main.
public static void main(String[] args) throws ParseException {
String hostname = null;
String username = null;
String password = null;
String rrdRepository = null;
final Options options = new Options();
options.addOption("rrdRepository", true, "set rrdRepository path for generated config files, default: '/opt/opennms/share/rrd/snmp/'");
final CommandLineParser parser = new PosixParser();
final CommandLine cmd = parser.parse(options, args);
@SuppressWarnings("unchecked") List<String> arguments = (List<String>) cmd.getArgList();
if (arguments.size() < 3) {
usage(options, cmd);
System.exit(1);
}
hostname = arguments.remove(0);
username = arguments.remove(0);
password = arguments.remove(0);
if (cmd.hasOption("rrdRepository")) {
rrdRepository = cmd.getOptionValue("rrdRepository");
} else {
rrdRepository = "/opt/opennms/share/rrd/snmp/";
}
TrustManager[] trustAllCerts = new TrustManager[] { new AnyServerX509TrustManager() };
SSLContext sc = null;
try {
sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, null);
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
} catch (KeyManagementException e) {
e.printStackTrace();
}
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
HostnameVerifier hv = new HostnameVerifier() {
@Override
public boolean verify(String urlHostName, SSLSession session) {
return true;
}
};
HttpsURLConnection.setDefaultHostnameVerifier(hv);
VmwareConfigBuilder vmwareConfigBuilder;
vmwareConfigBuilder = new VmwareConfigBuilder(hostname, username, password);
try {
vmwareConfigBuilder.generateData(rrdRepository);
} catch (Exception e) {
e.printStackTrace();
}
}
Aggregations