use of joptsimple.OptionSpec in project kafka by apache.
the class StreamsResetter method parseArguments.
private void parseArguments(final String[] args) {
final OptionParser optionParser = new OptionParser(false);
applicationIdOption = optionParser.accepts("application-id", "The Kafka Streams application ID (application.id).").withRequiredArg().ofType(String.class).describedAs("id").required();
bootstrapServerOption = optionParser.accepts("bootstrap-servers", "Comma-separated list of broker urls with format: HOST1:PORT1,HOST2:PORT2").withRequiredArg().ofType(String.class).defaultsTo("localhost:9092").describedAs("urls");
inputTopicsOption = optionParser.accepts("input-topics", "Comma-separated list of user input topics. For these topics, the tool by default will reset the offset to the earliest available offset. " + "Reset to other offset position by appending other reset offset option, ex: --input-topics foo --shift-by 5").withRequiredArg().ofType(String.class).withValuesSeparatedBy(',').describedAs("list");
intermediateTopicsOption = optionParser.accepts("intermediate-topics", "Comma-separated list of intermediate user topics (topics that are input and output topics, " + "e.g., used in the deprecated through() method). For these topics, the tool will skip to the end.").withRequiredArg().ofType(String.class).withValuesSeparatedBy(',').describedAs("list");
internalTopicsOption = optionParser.accepts("internal-topics", "Comma-separated list of " + "internal topics to delete. Must be a subset of the internal topics marked for deletion by the " + "default behaviour (do a dry-run without this option to view these topics).").withRequiredArg().ofType(String.class).withValuesSeparatedBy(',').describedAs("list");
toOffsetOption = optionParser.accepts("to-offset", "Reset offsets to a specific offset.").withRequiredArg().ofType(Long.class);
toDatetimeOption = optionParser.accepts("to-datetime", "Reset offsets to offset from datetime. Format: 'YYYY-MM-DDTHH:mm:SS.sss'").withRequiredArg().ofType(String.class);
byDurationOption = optionParser.accepts("by-duration", "Reset offsets to offset by duration from current timestamp. Format: 'PnDTnHnMnS'").withRequiredArg().ofType(String.class);
toEarliestOption = optionParser.accepts("to-earliest", "Reset offsets to earliest offset.");
toLatestOption = optionParser.accepts("to-latest", "Reset offsets to latest offset.");
fromFileOption = optionParser.accepts("from-file", "Reset offsets to values defined in CSV file.").withRequiredArg().ofType(String.class);
shiftByOption = optionParser.accepts("shift-by", "Reset offsets shifting current offset by 'n', where 'n' can be positive or negative").withRequiredArg().describedAs("number-of-offsets").ofType(Long.class);
commandConfigOption = optionParser.accepts("config-file", "Property file containing configs to be passed to admin clients and embedded consumer.").withRequiredArg().ofType(String.class).describedAs("file name");
forceOption = optionParser.accepts("force", "Force the removal of members of the consumer group (intended to remove stopped members if a long session timeout was used). " + "Make sure to shut down all stream applications when this option is specified to avoid unexpected rebalances.");
dryRunOption = optionParser.accepts("dry-run", "Display the actions that would be performed without executing the reset commands.");
helpOption = optionParser.accepts("help", "Print usage information.").forHelp();
versionOption = optionParser.accepts("version", "Print version information and exit.").forHelp();
try {
options = optionParser.parse(args);
if (args.length == 0 || options.has(helpOption)) {
CommandLineUtils.printUsageAndDie(optionParser, USAGE);
}
if (options.has(versionOption)) {
CommandLineUtils.printVersionAndDie();
}
} catch (final OptionException e) {
CommandLineUtils.printUsageAndDie(optionParser, e.getMessage());
}
final Set<OptionSpec<?>> allScenarioOptions = new HashSet<>();
allScenarioOptions.add(toOffsetOption);
allScenarioOptions.add(toDatetimeOption);
allScenarioOptions.add(byDurationOption);
allScenarioOptions.add(toEarliestOption);
allScenarioOptions.add(toLatestOption);
allScenarioOptions.add(fromFileOption);
allScenarioOptions.add(shiftByOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, toOffsetOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, toDatetimeOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, byDurationOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, toEarliestOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, toLatestOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, fromFileOption);
checkInvalidArgs(optionParser, options, allScenarioOptions, shiftByOption);
}
use of joptsimple.OptionSpec in project jackrabbit-oak by apache.
the class Explorer method main.
public static void main(String[] args) throws IOException {
OptionParser parser = new OptionParser();
OptionSpec skipSizeCheck = parser.accepts("skip-size-check", "Don't compute the size of the records");
OptionSpec<File> nonOptions = parser.nonOptions().ofType(File.class);
OptionSet options = parser.parse(args);
if (options.valuesOf(nonOptions).isEmpty()) {
parser.printHelpOn(System.err);
System.exit(1);
}
File path = options.valuesOf(nonOptions).get(0);
ExplorerBackend backend = new SegmentTarExplorerBackend(path);
new Explorer(path, backend, options.has(skipSizeCheck));
}
use of joptsimple.OptionSpec in project ambry by linkedin.
the class HelixBootstrapUpgradeTool method main.
/**
* @param args takes in three mandatory arguments: the hardware layout path, the partition layout path and the zk
* layout path.
* The Zk layout has to be of the following form:
* {
* "zkInfo" : [
* {
* "datacenter":"dc1",
* "id": "1",
* "zkConnectStr":"abc.example.com:2199",
* },
* {
* "datacenter":"dc2",
* "id": "2",
* "zkConnectStr":"def.example.com:2300",
* }
* ]
* }
*
* Also takes in an optional argument that specifies the local datacenter name, so that can be used as
* the "reference" datacenter. If none provided, the tool simply chooses one of the datacenters in the
* layout as the reference datacenter.
*/
public static void main(String[] args) throws Exception {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> hardwareLayoutPathOpt = parser.accepts("hardwareLayoutPath", "The path to the hardware layout json file").withRequiredArg().describedAs("hardware_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutPathOpt = parser.accepts("partitionLayoutPath", "The path to the partition layout json file").withRequiredArg().describedAs("partition_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> zkLayoutPathOpt = parser.accepts("zkLayoutPath", "The path to the json file containing zookeeper connect info. This should be of the following form: \n{\n" + " \"zkInfo\" : [\n" + " {\n" + " \"datacenter\":\"dc1\",\n" + " \"zkConnectStr\":\"abc.example.com:2199\",\n" + " },\n" + " {\n" + " \"datacenter\":\"dc2\",\n" + " \"zkConnectStr\":\"def.example.com:2300\",\n" + " },\n" + " {\n" + " \"datacenter\":\"dc3\",\n" + " \"zkConnectStr\":\"ghi.example.com:2400\",\n" + " }\n" + " ]\n" + "}").withRequiredArg().describedAs("zk_connect_info_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> clusterNamePrefixOpt = parser.accepts("clusterNamePrefix", "The prefix for the cluster in Helix to bootstrap or upgrade").withRequiredArg().describedAs("cluster_name_prefix").ofType(String.class);
ArgumentAcceptingOptionSpec<String> localDcOpt = parser.accepts("localDc", "(Optional argument) The local datacenter name").withRequiredArg().describedAs("local_dc").ofType(String.class);
ArgumentAcceptingOptionSpec<String> maxPartitionsInOneResourceOpt = parser.accepts("maxPartitionsInOneResource", "(Optional argument) The maximum number of partitions that should be grouped under a Helix resource").withRequiredArg().describedAs("max_partitions_in_one_resource").ofType(String.class);
OptionSet options = parser.parse(args);
String hardwareLayoutPath = options.valueOf(hardwareLayoutPathOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutPathOpt);
String zkLayoutPath = options.valueOf(zkLayoutPathOpt);
String clusterNamePrefix = options.valueOf(clusterNamePrefixOpt);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(hardwareLayoutPathOpt);
listOpt.add(partitionLayoutPathOpt);
listOpt.add(zkLayoutPathOpt);
listOpt.add(clusterNamePrefixOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, options.valueOf(localDcOpt), options.valueOf(maxPartitionsInOneResourceOpt) == null ? DEFAULT_MAX_PARTITIONS_PER_RESOURCE : Integer.valueOf(options.valueOf(maxPartitionsInOneResourceOpt)), new HelixAdminFactory());
}
use of joptsimple.OptionSpec in project ambry by linkedin.
the class PartitionManager method main.
public static void main(String[] args) {
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> operationTypeOpt = parser.accepts("operationType", " REQUIRED: The type of operation to perform on the partition. Currently supported" + " operations are 'AddPartition', 'AddReplicas'").withRequiredArg().describedAs("operation_type").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutPathOpt = parser.accepts("hardwareLayoutPath", " REQUIRED: The path to the hardware layout map").withRequiredArg().describedAs("hardware_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutPathOpt = parser.accepts("partitionLayoutPath", "REQUIRED: The path to the partition layout map. If outputPartitionLayoutPath is not defined," + "this file is updated with the new partitions").withRequiredArg().describedAs("partition_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> outputPartitionLayoutPathOpt = parser.accepts("outputPartitionLayoutPath", "The path to the output partition layout map. The file is updated with the new partitions").withOptionalArg().describedAs("output_partition_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfPartitionsOpt = parser.accepts("numberOfPartitionsToAdd", "The number of partitions to add").withOptionalArg().ofType(Integer.class);
ArgumentAcceptingOptionSpec<Integer> numberOfReplicasPerDatacenterOpt = parser.accepts("numberOfReplicasPerDatacenter", "The number of replicas for the partition per datacenter when adding partitions").withOptionalArg().ofType(Integer.class);
ArgumentAcceptingOptionSpec<Long> replicaCapacityInBytesOpt = parser.accepts("replicaCapacityInBytes", "The capacity of each replica in bytes for the partitions to add").withOptionalArg().ofType(Long.class);
ArgumentAcceptingOptionSpec<String> partitionIdsToAddReplicasToOpt = parser.accepts("partitionIdToAddReplicasTo", "The partitionIds to add replicas to. This can either take a " + "comma separated list of partitions to add replicas to or '.' to add replicas to all partitions in " + "the partitionLayout ").withOptionalArg().ofType(String.class);
ArgumentAcceptingOptionSpec<String> datacenterToAddReplicasToOpt = parser.accepts("datacenterToAddReplicasTo", "The data center to which replicas need to be added to").withOptionalArg().ofType(String.class);
String attemptNonRackAwareOnFailureFlag = "attemptNonRackAwareOnFailure";
parser.accepts(attemptNonRackAwareOnFailureFlag, "If a rack-aware partition allocation cannot be found, attempt a non rack-aware one");
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<OptionSpec>();
listOpt.add(hardwareLayoutPathOpt);
listOpt.add(operationTypeOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
String hardwareLayoutPath = options.valueOf(hardwareLayoutPathOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutPathOpt);
String outputPartitionLayoutPath = options.has(outputPartitionLayoutPathOpt) ? options.valueOf(outputPartitionLayoutPathOpt) : partitionLayoutPath;
String operationType = options.valueOf(operationTypeOpt);
boolean attemptNonRackAwareOnFailure = options.has(attemptNonRackAwareOnFailureFlag);
String fileString = null;
try {
fileString = Utils.readStringFromFile(partitionLayoutPath);
} catch (FileNotFoundException e) {
System.out.println("Partition layout path not found. Creating new file");
}
StaticClusterManager manager = null;
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
if (fileString == null) {
manager = (new StaticClusterAgentsFactory(clusterMapConfig, new PartitionLayout(new HardwareLayout(new JSONObject(Utils.readStringFromFile(hardwareLayoutPath)), clusterMapConfig)))).getClusterMap();
} else {
manager = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
}
if (operationType.compareToIgnoreCase("AddPartition") == 0) {
listOpt.add(numberOfPartitionsOpt);
listOpt.add(numberOfReplicasPerDatacenterOpt);
listOpt.add(replicaCapacityInBytesOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
int numberOfPartitions = options.valueOf(numberOfPartitionsOpt);
int numberOfReplicas = options.valueOf(numberOfReplicasPerDatacenterOpt);
long replicaCapacityInBytes = options.valueOf(replicaCapacityInBytesOpt);
manager.allocatePartitions(numberOfPartitions, numberOfReplicas, replicaCapacityInBytes, attemptNonRackAwareOnFailure);
} else if (operationType.compareToIgnoreCase("AddReplicas") == 0) {
listOpt.add(partitionIdsToAddReplicasToOpt);
listOpt.add(datacenterToAddReplicasToOpt);
listOpt.add(partitionLayoutPathOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
String partitionIdsToAddReplicas = options.valueOf(partitionIdsToAddReplicasToOpt);
String datacenterToAddReplicasTo = options.valueOf(datacenterToAddReplicasToOpt);
if (partitionIdsToAddReplicas.compareToIgnoreCase(".") == 0) {
for (PartitionId partitionId : manager.getAllPartitionIds()) {
manager.addReplicas(partitionId, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
}
} else {
String[] partitionIds = partitionIdsToAddReplicas.split(",");
for (String partitionId : partitionIds) {
for (PartitionId partitionInCluster : manager.getAllPartitionIds()) {
if (partitionInCluster.isEqual(partitionId)) {
manager.addReplicas(partitionInCluster, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
}
}
}
}
}
manager.persist(hardwareLayoutPath, outputPartitionLayoutPath);
} catch (Exception e) {
System.out.println("Error while executing partition command " + e);
}
}
use of joptsimple.OptionSpec in project ambry by linkedin.
the class IndexReadPerformance method main.
public static void main(String[] args) {
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfReadersOpt = parser.accepts("numberOfReaders", "The number of readers that read to a random index concurrently").withRequiredArg().describedAs("The number of readers").ofType(Integer.class).defaultsTo(4);
ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(logToReadOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
String logToRead = options.valueOf(logToReadOpt);
int numberOfReaders = options.valueOf(numberOfReadersOpt);
int readsPerSecond = options.valueOf(readsPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
StoreKeyFactory factory = new BlobIdFactory(map);
// Read the log and get the index directories and create the indexes
final BufferedReader br = new BufferedReader(new FileReader(logToRead));
final HashMap<String, IndexPayload> hashes = new HashMap<String, IndexPayload>();
String line;
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
ScheduledExecutorService s = Utils.newScheduler(numberOfReaders, "index", true);
DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
Log log = new Log(System.getProperty("user.dir"), 1000, 1000, diskSpaceAllocator, metrics);
Properties props = new Properties();
props.setProperty("store.index.memory.size.bytes", "1048576");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
final AtomicLong totalTimeTaken = new AtomicLong(0);
final AtomicLong totalReads = new AtomicLong(0);
final CountDownLatch latch = new CountDownLatch(numberOfReaders);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
latch.await();
System.out.println("Total reads : " + totalReads.get() + " Total time taken : " + totalTimeTaken.get() + " Nano Seconds Average time taken per read " + ((double) totalReads.get() / totalTimeTaken.get()) / SystemTime.NsPerSec + " Seconds");
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
ScheduledExecutorService scheduleReadLog = Utils.newScheduler(1, true);
while ((line = br.readLine()) != null) {
if (line.startsWith("logdir")) {
String[] logdirs = line.split("-");
BlobIndexMetrics metricIndex = new BlobIndexMetrics(logdirs[1], s, log, enableVerboseLogging, totalReads, totalTimeTaken, totalReads, config, null, factory);
hashes.put(logdirs[1], new IndexPayload(metricIndex, new HashSet<String>()));
} else {
break;
}
}
// read next batch of ids after 2 minutes
scheduleReadLog.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
populateIds(br, hashes);
}
}, 0, 120, TimeUnit.SECONDS);
Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
Thread[] threadIndexPerf = new Thread[numberOfReaders];
for (int i = 0; i < numberOfReaders; i++) {
threadIndexPerf[i] = new Thread(new IndexReadPerfRun(hashes, throttler, shutdown, latch, map));
threadIndexPerf[i].start();
}
for (int i = 0; i < numberOfReaders; i++) {
threadIndexPerf[i].join();
}
br.close();
} catch (Exception e) {
System.out.println("Exiting process with exception " + e);
}
}
Aggregations