use of net.sourceforge.argparse4j.inf.ArgumentParser in project helios by spotify.
the class JobListCommandTest method setUp.
@Before
public void setUp() {
// use a real, dummy Subparser impl to avoid having to mock out every single call
final ArgumentParser parser = ArgumentParsers.newArgumentParser("test");
final Subparser subparser = parser.addSubparsers().addParser("list");
command = new JobListCommand(subparser);
when(client.jobs()).thenReturn(Futures.immediateFuture(jobs));
final Map<JobId, JobStatus> statuses = new HashMap<>();
for (final JobId jobId : jobs.keySet()) {
// pretend each job is deployed
final JobStatus status = JobStatus.newBuilder().setDeployments(ImmutableMap.of("host", Deployment.of(jobId, Goal.START))).build();
statuses.put(jobId, status);
}
when(client.jobStatuses(jobs.keySet())).thenReturn(Futures.immediateFuture(statuses));
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class ClientCompatibilityTest method main.
public static void main(String[] args) throws Exception {
ArgumentParser parser = ArgumentParsers.newArgumentParser("client-compatibility-test").defaultHelp(true).description("This tool is used to verify client compatibility guarantees.");
parser.addArgument("--topic").action(store()).required(true).type(String.class).dest("topic").metavar("TOPIC").help("the compatibility test will produce messages to this topic");
parser.addArgument("--bootstrap-server").action(store()).required(true).type(String.class).dest("bootstrapServer").metavar("BOOTSTRAP_SERVER").help("The server(s) to use for bootstrapping");
parser.addArgument("--offsets-for-times-supported").action(store()).required(true).type(Boolean.class).dest("offsetsForTimesSupported").metavar("OFFSETS_FOR_TIMES_SUPPORTED").help("True if KafkaConsumer#offsetsForTimes is supported by the current broker version");
parser.addArgument("--cluster-id-supported").action(store()).required(true).type(Boolean.class).dest("clusterIdSupported").metavar("CLUSTER_ID_SUPPORTED").help("True if cluster IDs are supported. False if cluster ID always appears as null.");
parser.addArgument("--expect-record-too-large-exception").action(store()).required(true).type(Boolean.class).dest("expectRecordTooLargeException").metavar("EXPECT_RECORD_TOO_LARGE_EXCEPTION").help("True if we should expect a RecordTooLargeException when trying to read from a topic " + "that contains a message that is bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ". This is pre-KIP-74 behavior.");
Namespace res = null;
try {
res = parser.parseArgs(args);
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
TestConfig testConfig = new TestConfig(res);
ClientCompatibilityTest test = new ClientCompatibilityTest(testConfig);
try {
test.run();
} catch (Throwable t) {
System.out.printf("FAILED: Caught exception %s%n%n", t.getMessage());
t.printStackTrace();
Exit.exit(1);
}
System.out.println("SUCCESS.");
Exit.exit(0);
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class ProducerPerformance method main.
public static void main(String[] args) throws Exception {
ArgumentParser parser = argParser();
try {
Namespace res = parser.parseArgs(args);
/* parse args */
String topicName = res.getString("topic");
long numRecords = res.getLong("numRecords");
Integer recordSize = res.getInt("recordSize");
int throughput = res.getInt("throughput");
List<String> producerProps = res.getList("producerConfig");
String producerConfig = res.getString("producerConfigFile");
String payloadFilePath = res.getString("payloadFile");
// since default value gets printed with the help text, we are escaping \n there and replacing it with correct value here.
String payloadDelimiter = res.getString("payloadDelimiter").equals("\\n") ? "\n" : res.getString("payloadDelimiter");
if (producerProps == null && producerConfig == null) {
throw new ArgumentParserException("Either --producer-props or --producer.config must be specified.", parser);
}
List<byte[]> payloadByteList = new ArrayList<>();
if (payloadFilePath != null) {
Path path = Paths.get(payloadFilePath);
System.out.println("Reading payloads from: " + path.toAbsolutePath());
if (Files.notExists(path) || Files.size(path) == 0) {
throw new IllegalArgumentException("File does not exist or empty file provided.");
}
String[] payloadList = new String(Files.readAllBytes(path), "UTF-8").split(payloadDelimiter);
System.out.println("Number of messages read: " + payloadList.length);
for (String payload : payloadList) {
payloadByteList.add(payload.getBytes(StandardCharsets.UTF_8));
}
}
Properties props = new Properties();
if (producerConfig != null) {
props.putAll(Utils.loadProps(producerConfig));
}
if (producerProps != null)
for (String prop : producerProps) {
String[] pieces = prop.split("=");
if (pieces.length != 2)
throw new IllegalArgumentException("Invalid property: " + prop);
props.put(pieces[0], pieces[1]);
}
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(props);
/* setup perf test */
byte[] payload = null;
Random random = new Random(0);
if (recordSize != null) {
payload = new byte[recordSize];
for (int i = 0; i < payload.length; ++i) payload[i] = (byte) (random.nextInt(26) + 65);
}
ProducerRecord<byte[], byte[]> record;
Stats stats = new Stats(numRecords, 5000);
long startMs = System.currentTimeMillis();
ThroughputThrottler throttler = new ThroughputThrottler(throughput, startMs);
for (int i = 0; i < numRecords; i++) {
if (payloadFilePath != null) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
}
record = new ProducerRecord<>(topicName, payload);
long sendStartMs = System.currentTimeMillis();
Callback cb = stats.nextCompletion(sendStartMs, payload.length, stats);
producer.send(record, cb);
if (throttler.shouldThrottle(i, sendStartMs)) {
throttler.throttle();
}
}
/* print final results */
producer.close();
stats.printTotal();
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class ProducerPerformance method argParser.
/** Get the command-line argument parser. */
private static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers.newArgumentParser("producer-performance").defaultHelp(true).description("This tool is used to verify the producer performance.");
MutuallyExclusiveGroup payloadOptions = parser.addMutuallyExclusiveGroup().required(true).description("either --record-size or --payload-file must be specified but not both.");
parser.addArgument("--topic").action(store()).required(true).type(String.class).metavar("TOPIC").help("produce messages to this topic");
parser.addArgument("--num-records").action(store()).required(true).type(Long.class).metavar("NUM-RECORDS").dest("numRecords").help("number of messages to produce");
payloadOptions.addArgument("--record-size").action(store()).required(false).type(Integer.class).metavar("RECORD-SIZE").dest("recordSize").help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file.");
payloadOptions.addArgument("--payload-file").action(store()).required(false).type(String.class).metavar("PAYLOAD-FILE").dest("payloadFile").help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file.");
parser.addArgument("--payload-delimiter").action(store()).required(false).type(String.class).metavar("PAYLOAD-DELIMITER").dest("payloadDelimiter").setDefault("\\n").help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided.");
parser.addArgument("--throughput").action(store()).required(true).type(Integer.class).metavar("THROUGHPUT").help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec");
parser.addArgument("--producer-props").nargs("+").required(false).metavar("PROP-NAME=PROP-VALUE").type(String.class).dest("producerConfig").help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config.");
parser.addArgument("--producer.config").action(store()).required(false).type(String.class).metavar("CONFIG-FILE").dest("producerConfigFile").help("producer config properties file.");
return parser;
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class VerifiableConsumer method argParser.
private static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers.newArgumentParser("verifiable-consumer").defaultHelp(true).description("This tool consumes messages from a specific topic and emits consumer events (e.g. group rebalances, received messages, and offsets committed) as JSON objects to STDOUT.");
parser.addArgument("--broker-list").action(store()).required(true).type(String.class).metavar("HOST1:PORT1[,HOST2:PORT2[...]]").dest("brokerList").help("Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...");
parser.addArgument("--topic").action(store()).required(true).type(String.class).metavar("TOPIC").help("Consumes messages from this topic.");
parser.addArgument("--group-id").action(store()).required(true).type(String.class).metavar("GROUP_ID").dest("groupId").help("The groupId shared among members of the consumer group");
parser.addArgument("--max-messages").action(store()).required(false).type(Integer.class).setDefault(-1).metavar("MAX-MESSAGES").dest("maxMessages").help("Consume this many messages. If -1 (the default), the consumer will consume until the process is killed externally");
parser.addArgument("--session-timeout").action(store()).required(false).setDefault(30000).type(Integer.class).metavar("TIMEOUT_MS").dest("sessionTimeout").help("Set the consumer's session timeout");
parser.addArgument("--verbose").action(storeTrue()).type(Boolean.class).metavar("VERBOSE").help("Enable to log individual consumed records");
parser.addArgument("--enable-autocommit").action(storeTrue()).type(Boolean.class).metavar("ENABLE-AUTOCOMMIT").dest("useAutoCommit").help("Enable offset auto-commit on consumer");
parser.addArgument("--reset-policy").action(store()).required(false).setDefault("earliest").type(String.class).dest("resetPolicy").help("Set reset policy (must be either 'earliest', 'latest', or 'none'");
parser.addArgument("--assignment-strategy").action(store()).required(false).setDefault(RangeAssignor.class.getName()).type(String.class).dest("assignmentStrategy").help("Set assignment strategy (e.g. " + RoundRobinAssignor.class.getName() + ")");
parser.addArgument("--consumer.config").action(store()).required(false).type(String.class).metavar("CONFIG_FILE").help("Consumer config properties file (config options shared with command line parameters will be overridden).");
return parser;
}
Aggregations