use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class ClientCompatibilityTest method main.
public static void main(String[] args) throws Exception {
ArgumentParser parser = ArgumentParsers.newArgumentParser("client-compatibility-test").defaultHelp(true).description("This tool is used to verify client compatibility guarantees.");
parser.addArgument("--topic").action(store()).required(true).type(String.class).dest("topic").metavar("TOPIC").help("the compatibility test will produce messages to this topic");
parser.addArgument("--bootstrap-server").action(store()).required(true).type(String.class).dest("bootstrapServer").metavar("BOOTSTRAP_SERVER").help("The server(s) to use for bootstrapping");
parser.addArgument("--offsets-for-times-supported").action(store()).required(true).type(Boolean.class).dest("offsetsForTimesSupported").metavar("OFFSETS_FOR_TIMES_SUPPORTED").help("True if KafkaConsumer#offsetsForTimes is supported by the current broker version");
parser.addArgument("--cluster-id-supported").action(store()).required(true).type(Boolean.class).dest("clusterIdSupported").metavar("CLUSTER_ID_SUPPORTED").help("True if cluster IDs are supported. False if cluster ID always appears as null.");
parser.addArgument("--expect-record-too-large-exception").action(store()).required(true).type(Boolean.class).dest("expectRecordTooLargeException").metavar("EXPECT_RECORD_TOO_LARGE_EXCEPTION").help("True if we should expect a RecordTooLargeException when trying to read from a topic " + "that contains a message that is bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ". This is pre-KIP-74 behavior.");
parser.addArgument("--num-cluster-nodes").action(store()).required(true).type(Integer.class).dest("numClusterNodes").metavar("NUM_CLUSTER_NODES").help("The number of cluster nodes we should expect to see from the AdminClient.");
parser.addArgument("--create-topics-supported").action(store()).required(true).type(Boolean.class).dest("createTopicsSupported").metavar("CREATE_TOPICS_SUPPORTED").help("Whether we should be able to create topics via the AdminClient.");
parser.addArgument("--describe-acls-supported").action(store()).required(true).type(Boolean.class).dest("describeAclsSupported").metavar("DESCRIBE_ACLS_SUPPORTED").help("Whether describeAcls is supported in the AdminClient.");
parser.addArgument("--describe-configs-supported").action(store()).required(true).type(Boolean.class).dest("describeConfigsSupported").metavar("DESCRIBE_CONFIGS_SUPPORTED").help("Whether describeConfigs is supported in the AdminClient.");
Namespace res = null;
try {
res = parser.parseArgs(args);
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
TestConfig testConfig = new TestConfig(res);
ClientCompatibilityTest test = new ClientCompatibilityTest(testConfig);
try {
test.run();
} catch (Throwable t) {
System.out.printf("FAILED: Caught exception %s%n%n", t.getMessage());
t.printStackTrace();
Exit.exit(1);
}
System.out.println("SUCCESS.");
Exit.exit(0);
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class ProducerPerformance method argParser.
/**
* Get the command-line argument parser.
*/
static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers.newArgumentParser("producer-performance").defaultHelp(true).description("This tool is used to verify the producer performance.");
MutuallyExclusiveGroup payloadOptions = parser.addMutuallyExclusiveGroup().required(true).description("either --record-size or --payload-file must be specified but not both.");
parser.addArgument("--topic").action(store()).required(true).type(String.class).metavar("TOPIC").help("produce messages to this topic");
parser.addArgument("--num-records").action(store()).required(true).type(Long.class).metavar("NUM-RECORDS").dest("numRecords").help("number of messages to produce");
payloadOptions.addArgument("--record-size").action(store()).required(false).type(Integer.class).metavar("RECORD-SIZE").dest("recordSize").help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file.");
payloadOptions.addArgument("--payload-file").action(store()).required(false).type(String.class).metavar("PAYLOAD-FILE").dest("payloadFile").help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file.");
parser.addArgument("--payload-delimiter").action(store()).required(false).type(String.class).metavar("PAYLOAD-DELIMITER").dest("payloadDelimiter").setDefault("\\n").help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided.");
parser.addArgument("--throughput").action(store()).required(true).type(Integer.class).metavar("THROUGHPUT").help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling.");
parser.addArgument("--producer-props").nargs("+").required(false).metavar("PROP-NAME=PROP-VALUE").type(String.class).dest("producerConfig").help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config.");
parser.addArgument("--producer.config").action(store()).required(false).type(String.class).metavar("CONFIG-FILE").dest("producerConfigFile").help("producer config properties file.");
parser.addArgument("--print-metrics").action(storeTrue()).type(Boolean.class).metavar("PRINT-METRICS").dest("printMetrics").help("print out metrics at the end of the test.");
parser.addArgument("--transactional-id").action(store()).required(false).type(String.class).metavar("TRANSACTIONAL-ID").dest("transactionalId").setDefault("performance-producer-default-transactional-id").help("The transactionalId to use if transaction-duration-ms is > 0. Useful when testing the performance of concurrent transactions.");
parser.addArgument("--transaction-duration-ms").action(store()).required(false).type(Long.class).metavar("TRANSACTION-DURATION").dest("transactionDurationMs").setDefault(0L).help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. Transactions are only enabled if this value is positive.");
return parser;
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class VerifiableConsumer method argParser.
private static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers.newArgumentParser("verifiable-consumer").defaultHelp(true).description("This tool consumes messages from a specific topic and emits consumer events (e.g. group rebalances, received messages, and offsets committed) as JSON objects to STDOUT.");
MutuallyExclusiveGroup connectionGroup = parser.addMutuallyExclusiveGroup("Connection Group").description("Group of arguments for connection to brokers").required(true);
connectionGroup.addArgument("--bootstrap-server").action(store()).required(false).type(String.class).metavar("HOST1:PORT1[,HOST2:PORT2[...]]").dest("bootstrapServer").help("REQUIRED unless --broker-list(deprecated) is specified. The server(s) to connect to. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...");
connectionGroup.addArgument("--broker-list").action(store()).required(false).type(String.class).metavar("HOST1:PORT1[,HOST2:PORT2[...]]").dest("brokerList").help("DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap-server is specified. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...");
parser.addArgument("--topic").action(store()).required(true).type(String.class).metavar("TOPIC").help("Consumes messages from this topic.");
parser.addArgument("--group-id").action(store()).required(true).type(String.class).metavar("GROUP_ID").dest("groupId").help("The groupId shared among members of the consumer group");
parser.addArgument("--group-instance-id").action(store()).required(false).type(String.class).metavar("GROUP_INSTANCE_ID").dest("groupInstanceId").help("A unique identifier of the consumer instance");
parser.addArgument("--max-messages").action(store()).required(false).type(Integer.class).setDefault(-1).metavar("MAX-MESSAGES").dest("maxMessages").help("Consume this many messages. If -1 (the default), the consumer will consume until the process is killed externally");
parser.addArgument("--session-timeout").action(store()).required(false).setDefault(30000).type(Integer.class).metavar("TIMEOUT_MS").dest("sessionTimeout").help("Set the consumer's session timeout");
parser.addArgument("--verbose").action(storeTrue()).type(Boolean.class).metavar("VERBOSE").help("Enable to log individual consumed records");
parser.addArgument("--enable-autocommit").action(storeTrue()).type(Boolean.class).metavar("ENABLE-AUTOCOMMIT").dest("useAutoCommit").help("Enable offset auto-commit on consumer");
parser.addArgument("--reset-policy").action(store()).required(false).setDefault("earliest").type(String.class).dest("resetPolicy").help("Set reset policy (must be either 'earliest', 'latest', or 'none'");
parser.addArgument("--assignment-strategy").action(store()).required(false).setDefault(RangeAssignor.class.getName()).type(String.class).dest("assignmentStrategy").help("Set assignment strategy (e.g. " + RoundRobinAssignor.class.getName() + ")");
parser.addArgument("--consumer.config").action(store()).required(false).type(String.class).metavar("CONFIG_FILE").help("Consumer config properties file (config options shared with command line parameters will be overridden).");
return parser;
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class VerifiableConsumer method main.
public static void main(String[] args) {
ArgumentParser parser = argParser();
if (args.length == 0) {
parser.printHelp();
// Can't use `Exit.exit` here because it didn't exist until 0.11.0.0.
System.exit(0);
}
try {
final VerifiableConsumer consumer = createFromArgs(parser, args);
// Can't use `Exit.addShutdownHook` here because it didn't exist until 2.5.0.
Runtime.getRuntime().addShutdownHook(new Thread(consumer::close, "verifiable-consumer-shutdown-hook"));
consumer.run();
} catch (ArgumentParserException e) {
parser.handleError(e);
// Can't use `Exit.exit` here because it didn't exist until 0.11.0.0.
System.exit(1);
}
}
use of net.sourceforge.argparse4j.inf.ArgumentParser in project kafka by apache.
the class VerifiableProducer method main.
public static void main(String[] args) {
ArgumentParser parser = argParser();
if (args.length == 0) {
parser.printHelp();
// Can't use `Exit.exit` here because it didn't exist until 0.11.0.0.
System.exit(0);
}
try {
final VerifiableProducer producer = createFromArgs(parser, args);
final long startMs = System.currentTimeMillis();
ThroughputThrottler throttler = new ThroughputThrottler(producer.throughput, startMs);
// Can't use `Exit.addShutdownHook` here because it didn't exist until 2.5.0.
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
// Trigger main thread to stop producing messages
producer.stopProducing = true;
// Flush any remaining messages
producer.close();
// Print a summary
long stopMs = System.currentTimeMillis();
double avgThroughput = 1000 * ((producer.numAcked) / (double) (stopMs - startMs));
producer.printJson(new ToolData(producer.numSent, producer.numAcked, producer.throughput, avgThroughput));
}, "verifiable-producer-shutdown-hook"));
producer.run(throttler);
} catch (ArgumentParserException e) {
parser.handleError(e);
// Can't use `Exit.exit` here because it didn't exist until 0.11.0.0.
System.exit(1);
}
}
Aggregations