use of org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser in project hbase by apache.
the class HMasterCommandLine method run.
@Override
public int run(String[] args) throws Exception {
boolean shutDownCluster = false;
Options opt = new Options();
opt.addOption("localRegionServers", true, "RegionServers to start in master process when running standalone");
opt.addOption("masters", true, "Masters to start in this process");
opt.addOption("minRegionServers", true, "Minimum RegionServers needed to host user tables");
opt.addOption("backup", false, "Do not try to become HMaster until the primary fails");
opt.addOption("shutDownCluster", false, "`hbase master stop --shutDownCluster` shuts down cluster");
CommandLine cmd;
try {
cmd = new GnuParser().parse(opt, args);
} catch (ParseException e) {
LOG.error("Could not parse: ", e);
usage(null);
return 1;
}
if (cmd.hasOption("minRegionServers")) {
String val = cmd.getOptionValue("minRegionServers");
getConf().setInt("hbase.regions.server.count.min", Integer.parseInt(val));
LOG.debug("minRegionServers set to " + val);
}
// minRegionServers used to be minServers. Support it too.
if (cmd.hasOption("minServers")) {
String val = cmd.getOptionValue("minServers");
getConf().setInt("hbase.regions.server.count.min", Integer.parseInt(val));
LOG.debug("minServers set to " + val);
}
// check if we are the backup master - override the conf if so
if (cmd.hasOption("backup")) {
getConf().setBoolean(HConstants.MASTER_TYPE_BACKUP, true);
}
// master when we are in local/standalone mode. Useful testing)
if (cmd.hasOption("localRegionServers")) {
String val = cmd.getOptionValue("localRegionServers");
getConf().setInt("hbase.regionservers", Integer.parseInt(val));
LOG.debug("localRegionServers set to " + val);
}
// How many masters to startup inside this process; useful testing
if (cmd.hasOption("masters")) {
String val = cmd.getOptionValue("masters");
getConf().setInt("hbase.masters", Integer.parseInt(val));
LOG.debug("masters set to " + val);
}
// Checking whether to shut down cluster or not
if (cmd.hasOption("shutDownCluster")) {
shutDownCluster = true;
}
@SuppressWarnings("unchecked") List<String> remainingArgs = cmd.getArgList();
if (remainingArgs.size() != 1) {
usage(null);
return 1;
}
String command = remainingArgs.get(0);
if ("start".equals(command)) {
return startMaster();
} else if ("stop".equals(command)) {
if (shutDownCluster) {
return stopMaster();
}
System.err.println("To shutdown the master run " + "hbase-daemon.sh stop master or send a kill signal to " + "the HMaster pid, " + "and to stop HBase Cluster run \"stop-hbase.sh\" or \"hbase master stop --shutDownCluster\"");
return 1;
} else if ("clear".equals(command)) {
return (ZNodeClearer.clear(getConf()) ? 0 : 1);
} else {
usage("Invalid command: " + command);
return 1;
}
}
use of org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser in project hbase by apache.
the class TestJoinedScanners method main.
/**
* Command line interface:
* @param args
* @throws IOException if there is a bug while reading from disk
*/
public static void main(final String[] args) throws Exception {
Option encodingOption = new Option("e", "blockEncoding", true, "Data block encoding; Default: FAST_DIFF");
encodingOption.setRequired(false);
options.addOption(encodingOption);
Option ratioOption = new Option("r", "selectionRatio", true, "Ratio of selected rows using essential column family");
ratioOption.setRequired(false);
options.addOption(ratioOption);
Option widthOption = new Option("w", "valueWidth", true, "Width of value for non-essential column family");
widthOption.setRequired(false);
options.addOption(widthOption);
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
if (args.length < 1) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("TestJoinedScanners", options, true);
}
if (cmd.hasOption("e")) {
blockEncoding = DataBlockEncoding.valueOf(cmd.getOptionValue("e"));
}
if (cmd.hasOption("r")) {
selectionRatio = Integer.parseInt(cmd.getOptionValue("r"));
}
if (cmd.hasOption("w")) {
valueWidth = Integer.parseInt(cmd.getOptionValue("w"));
}
// run the test
TestJoinedScanners test = new TestJoinedScanners();
test.testJoinedScanners();
}
use of org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser in project hbase by apache.
the class RegionSplitter method main.
/**
* The main function for the RegionSplitter application. Common uses:
* <p>
* <ul>
* <li>create a table named 'myTable' with 60 pre-split regions containing 2
* column families 'test' & 'rs', assuming the keys are hex-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs
* myTable HexStringSplit
* </ul>
* <li>create a table named 'myTable' with 50 pre-split regions,
* assuming the keys are decimal-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50
* myTable DecimalStringSplit
* </ul>
* <li>perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2
* outstanding splits at a time, assuming keys are uniformly distributed
* bytes:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable
* UniformSplit
* </ul>
* </ul>
*
* There are three SplitAlgorithms built into RegionSplitter, HexStringSplit,
* DecimalStringSplit, and UniformSplit. These are different strategies for
* choosing region boundaries. See their source code for details.
*
* @param args
* Usage: RegionSplitter <TABLE> <SPLITALGORITHM>
* <-c <# regions> -f <family:family:...> | -r
* [-o <# outstanding splits>]>
* [-D <conf.param=value>]
* @throws IOException
* HBase IO problem
* @throws InterruptedException
* user requested exit
* @throws ParseException
* problem parsing user input
*/
@SuppressWarnings("static-access")
public static void main(String[] args) throws IOException, InterruptedException, ParseException {
Configuration conf = HBaseConfiguration.create();
// parse user input
Options opt = new Options();
opt.addOption(OptionBuilder.withArgName("property=value").hasArg().withDescription("Override HBase Configuration Settings").create("D"));
opt.addOption(OptionBuilder.withArgName("region count").hasArg().withDescription("Create a new table with a pre-split number of regions").create("c"));
opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg().withDescription("Column Families to create with new table. Required with -c").create("f"));
opt.addOption("h", false, "Print this usage help");
opt.addOption("r", false, "Perform a rolling split of an existing region");
opt.addOption(OptionBuilder.withArgName("count").hasArg().withDescription("Max outstanding splits that have unfinished major compactions").create("o"));
opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm");
opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm");
opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. " + "STRONGLY DISCOURAGED for production systems. ");
CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("D")) {
for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
if (cmd.hasOption("risky")) {
conf.setBoolean("split.verify", false);
}
boolean createTable = cmd.hasOption("c") && cmd.hasOption("f");
boolean rollingSplit = cmd.hasOption("r");
boolean oneOperOnly = createTable ^ rollingSplit;
if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) {
new HelpFormatter().printHelp("bin/hbase regionsplitter <TABLE> <SPLITALGORITHM>\n" + "SPLITALGORITHM is the java class name of a class implementing " + "SplitAlgorithm, or one of the special strings HexStringSplit or " + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + "HexStringSplit treats keys as hexadecimal ASCII, and " + "DecimalStringSplit treats keys as decimal ASCII, and " + "UniformSplit treats keys as arbitrary bytes.", opt);
return;
}
TableName tableName = TableName.valueOf(cmd.getArgs()[0]);
String splitClass = cmd.getArgs()[1];
SplitAlgorithm splitAlgo = newSplitAlgoInstance(conf, splitClass);
if (cmd.hasOption("firstrow")) {
splitAlgo.setFirstRow(cmd.getOptionValue("firstrow"));
}
if (cmd.hasOption("lastrow")) {
splitAlgo.setLastRow(cmd.getOptionValue("lastrow"));
}
if (createTable) {
conf.set("split.count", cmd.getOptionValue("c"));
createPresplitTable(tableName, splitAlgo, cmd.getOptionValue("f").split(":"), conf);
}
if (rollingSplit) {
if (cmd.hasOption("o")) {
conf.set("split.outstanding", cmd.getOptionValue("o"));
}
rollingSplit(tableName, splitAlgo, conf);
}
}
use of org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser in project hbase by apache.
the class RegionPlacementMaintainer method main.
public static void main(String[] args) throws IOException {
Options opt = new Options();
opt.addOption("w", "write", false, "write the assignments to hbase:meta only");
opt.addOption("u", "update", false, "update the assignments to hbase:meta and RegionServers together");
opt.addOption("n", "dry-run", false, "do not write assignments to META");
opt.addOption("v", "verify", false, "verify current assignments against META");
opt.addOption("p", "print", false, "print the current assignment plan in META");
opt.addOption("h", "help", false, "print usage");
opt.addOption("d", "verification-details", false, "print the details of verification report");
opt.addOption("zk", true, "to set the zookeeper quorum");
opt.addOption("fs", true, "to set HDFS");
opt.addOption("hbase_root", true, "to set hbase_root directory");
opt.addOption("overwrite", false, "overwrite the favored nodes for a single region," + "for example: -update -r regionName -f server1:port,server2:port,server3:port");
opt.addOption("r", true, "The region name that needs to be updated");
opt.addOption("f", true, "The new favored nodes");
opt.addOption("tables", true, "The list of table names splitted by ',' ;" + "For example: -tables: t1,t2,...,tn");
opt.addOption("l", "locality", true, "enforce the maximum locality");
opt.addOption("m", "min-move", true, "enforce minimum assignment move");
opt.addOption("diff", false, "calculate difference between assignment plans");
opt.addOption("munkres", false, "use munkres to place secondaries and tertiaries");
opt.addOption("ld", "locality-dispersion", false, "print locality and dispersion " + "information for current plan");
try {
CommandLine cmd = new GnuParser().parse(opt, args);
Configuration conf = HBaseConfiguration.create();
boolean enforceMinAssignmentMove = true;
boolean enforceLocality = true;
boolean verificationDetails = false;
// Read all the options
if ((cmd.hasOption("l") && cmd.getOptionValue("l").equalsIgnoreCase("false")) || (cmd.hasOption("locality") && cmd.getOptionValue("locality").equalsIgnoreCase("false"))) {
enforceLocality = false;
}
if ((cmd.hasOption("m") && cmd.getOptionValue("m").equalsIgnoreCase("false")) || (cmd.hasOption("min-move") && cmd.getOptionValue("min-move").equalsIgnoreCase("false"))) {
enforceMinAssignmentMove = false;
}
if (cmd.hasOption("zk")) {
conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue("zk"));
LOG.info("Setting the zk quorum: " + conf.get(HConstants.ZOOKEEPER_QUORUM));
}
if (cmd.hasOption("fs")) {
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, cmd.getOptionValue("fs"));
LOG.info("Setting the HDFS: " + conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
}
if (cmd.hasOption("hbase_root")) {
conf.set(HConstants.HBASE_DIR, cmd.getOptionValue("hbase_root"));
LOG.info("Setting the hbase root directory: " + conf.get(HConstants.HBASE_DIR));
}
// Create the region placement obj
try (RegionPlacementMaintainer rp = new RegionPlacementMaintainer(conf, enforceLocality, enforceMinAssignmentMove)) {
if (cmd.hasOption("d") || cmd.hasOption("verification-details")) {
verificationDetails = true;
}
if (cmd.hasOption("tables")) {
String tableNameListStr = cmd.getOptionValue("tables");
String[] tableNames = StringUtils.split(tableNameListStr, ",");
rp.setTargetTableName(tableNames);
}
if (cmd.hasOption("munkres")) {
USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true;
}
// Read all the modes
if (cmd.hasOption("v") || cmd.hasOption("verify")) {
// Verify the region placement.
rp.verifyRegionPlacement(verificationDetails);
} else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) {
// Generate the assignment plan only without updating the hbase:meta and RS
FavoredNodesPlan plan = rp.getNewAssignmentPlan();
printAssignmentPlan(plan);
} else if (cmd.hasOption("w") || cmd.hasOption("write")) {
// Generate the new assignment plan
FavoredNodesPlan plan = rp.getNewAssignmentPlan();
// Print the new assignment plan
printAssignmentPlan(plan);
// Write the new assignment plan to META
rp.updateAssignmentPlanToMeta(plan);
} else if (cmd.hasOption("u") || cmd.hasOption("update")) {
// Generate the new assignment plan
FavoredNodesPlan plan = rp.getNewAssignmentPlan();
// Print the new assignment plan
printAssignmentPlan(plan);
// Update the assignment to hbase:meta and Region Servers
rp.updateAssignmentPlan(plan);
} else if (cmd.hasOption("diff")) {
FavoredNodesPlan newPlan = rp.getNewAssignmentPlan();
Map<String, Map<String, Float>> locality = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
Map<TableName, Integer> movesPerTable = rp.getRegionsMovement(newPlan);
rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan);
System.out.println("Do you want to update the assignment plan? [y/n]");
Scanner s = new Scanner(System.in);
String input = s.nextLine().trim();
if (input.equals("y")) {
System.out.println("Updating assignment plan...");
rp.updateAssignmentPlan(newPlan);
}
s.close();
} else if (cmd.hasOption("ld")) {
Map<String, Map<String, Float>> locality = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
rp.printLocalityAndDispersionForCurrentPlan(locality);
} else if (cmd.hasOption("p") || cmd.hasOption("print")) {
FavoredNodesPlan plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan();
printAssignmentPlan(plan);
} else if (cmd.hasOption("overwrite")) {
if (!cmd.hasOption("f") || !cmd.hasOption("r")) {
throw new IllegalArgumentException("Please specify: " + " -update -r regionName -f server1:port,server2:port,server3:port");
}
String regionName = cmd.getOptionValue("r");
String favoredNodesStr = cmd.getOptionValue("f");
LOG.info("Going to update the region " + regionName + " with the new favored nodes " + favoredNodesStr);
List<ServerName> favoredNodes = null;
RegionInfo regionInfo = rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName);
if (regionInfo == null) {
LOG.error("Cannot find the region " + regionName + " from the META");
} else {
try {
favoredNodes = getFavoredNodeList(favoredNodesStr);
} catch (IllegalArgumentException e) {
LOG.error("Cannot parse the invalid favored nodes because " + e);
}
FavoredNodesPlan newPlan = new FavoredNodesPlan();
newPlan.updateFavoredNodesMap(regionInfo, favoredNodes);
rp.updateAssignmentPlan(newPlan);
}
} else {
printHelp(opt);
}
}
} catch (ParseException e) {
printHelp(opt);
}
}
Aggregations