use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.
the class StreamsResetter method resetInputAndInternalAndSeekToEndIntermediateTopicOffsets.
private void resetInputAndInternalAndSeekToEndIntermediateTopicOffsets() {
final List<String> inputTopics = options.valuesOf(inputTopicsOption);
final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
System.out.println("No input or intermediate topics specified. Skipping seek.");
return;
} else {
if (inputTopics.size() != 0) {
System.out.println("Resetting offsets to zero for input topics " + inputTopics + " and all internal topics.");
}
if (intermediateTopics.size() != 0) {
System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
}
}
final Properties config = new Properties();
config.putAll(consumerConfig);
config.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServerOption));
config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, options.valueOf(applicationIdOption));
config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
for (final String topic : inputTopics) {
if (!allTopics.contains(topic)) {
System.err.println("Input topic " + topic + " not found. Skipping.");
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : intermediateTopics) {
if (!allTopics.contains(topic)) {
System.err.println("Intermediate topic " + topic + " not found. Skipping.");
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : allTopics) {
if (isInternalTopic(topic)) {
topicsToSubscribe.add(topic);
}
}
try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
client.subscribe(topicsToSubscribe);
client.poll(1);
final Set<TopicPartition> partitions = client.assignment();
final Set<TopicPartition> inputAndInternalTopicPartitions = new HashSet<>();
final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
for (final TopicPartition p : partitions) {
final String topic = p.topic();
if (isInputTopic(topic) || isInternalTopic(topic)) {
inputAndInternalTopicPartitions.add(p);
} else if (isIntermediateTopic(topic)) {
intermediateTopicPartitions.add(p);
} else {
System.err.println("Skipping invalid partition: " + p);
}
}
if (inputAndInternalTopicPartitions.size() > 0) {
client.seekToBeginning(inputAndInternalTopicPartitions);
}
if (intermediateTopicPartitions.size() > 0) {
client.seekToEnd(intermediateTopicPartitions);
}
for (final TopicPartition p : partitions) {
client.position(p);
}
client.commitSync();
} catch (final RuntimeException e) {
System.err.println("ERROR: Resetting offsets failed.");
throw e;
}
System.out.println("Done.");
}
use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.
the class VerifiableConsumer method createFromArgs.
public static VerifiableConsumer createFromArgs(ArgumentParser parser, String[] args) throws ArgumentParserException {
Namespace res = parser.parseArgs(args);
String topic = res.getString("topic");
boolean useAutoCommit = res.getBoolean("useAutoCommit");
int maxMessages = res.getInt("maxMessages");
boolean verbose = res.getBoolean("verbose");
String configFile = res.getString("consumer.config");
Properties consumerProps = new Properties();
if (configFile != null) {
try {
consumerProps.putAll(Utils.loadProps(configFile));
} catch (IOException e) {
throw new ArgumentParserException(e.getMessage(), parser);
}
}
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, res.getString("groupId"));
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, res.getString("brokerList"));
consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, useAutoCommit);
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, res.getString("resetPolicy"));
consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(res.getInt("sessionTimeout")));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, res.getString("assignmentStrategy"));
StringDeserializer deserializer = new StringDeserializer();
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
return new VerifiableConsumer(consumer, System.out, topic, maxMessages, useAutoCommit, false, verbose);
}
use of org.apache.kafka.clients.consumer.KafkaConsumer in project samza by apache.
the class TestStreamProcessor method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
private void verifyNumMessages(String topic, int expectedNumMessages) {
KafkaConsumer consumer = getKafkaConsumer();
consumer.subscribe(Collections.singletonList(topic));
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
Iterator<ConsumerRecord> iterator = records.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
count++;
}
} else {
emptyPollCount++;
}
}
Assert.assertEquals(count, expectedNumMessages);
}
use of org.apache.kafka.clients.consumer.KafkaConsumer in project samza by apache.
the class TestZkStreamProcessorBase method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
KafkaConsumer consumer = getKafkaConsumer();
consumer.subscribe(Collections.singletonList(topic));
Map<Integer, Boolean> map = new HashMap<>(expectedValues);
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
Iterator<ConsumerRecord> iterator = records.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
String val = new String((byte[]) record.value());
LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
Integer valI = Integer.valueOf(val);
if (valI < BAD_MESSAGE_KEY) {
map.put(valI, true);
count++;
}
}
} else {
emptyPollCount++;
LOG.warn("empty polls " + emptyPollCount);
}
}
// filter out numbers we did not get
long numFalse = map.values().stream().filter(v -> !v).count();
Assert.assertEquals("didn't get this number of events ", 0, numFalse);
Assert.assertEquals(expectedNumMessages, count);
}
use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.
the class ClientCompatibilityTest method testConsume.
public void testConsume(final long prodTimeMs) throws Exception {
Properties consumerProps = new Properties();
consumerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
consumerProps.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 512);
ClientCompatibilityTestDeserializer deserializer = new ClientCompatibilityTestDeserializer(testConfig.expectClusterId);
final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
final List<PartitionInfo> partitionInfos = consumer.partitionsFor(testConfig.topic);
if (partitionInfos.size() < 1)
throw new RuntimeException("Expected at least one partition for topic " + testConfig.topic);
final Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
final LinkedList<TopicPartition> topicPartitions = new LinkedList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
timestampsToSearch.put(topicPartition, prodTimeMs);
topicPartitions.add(topicPartition);
}
final OffsetsForTime offsetsForTime = new OffsetsForTime();
tryFeature("offsetsForTimes", testConfig.offsetsForTimesSupported, new Runnable() {
@Override
public void run() {
offsetsForTime.result = consumer.offsetsForTimes(timestampsToSearch);
}
}, new Runnable() {
@Override
public void run() {
log.info("offsetsForTime = {}", offsetsForTime.result);
}
});
// Whether or not offsetsForTimes works, beginningOffsets and endOffsets
// should work.
consumer.beginningOffsets(timestampsToSearch.keySet());
consumer.endOffsets(timestampsToSearch.keySet());
consumer.assign(topicPartitions);
consumer.seekToBeginning(topicPartitions);
final Iterator<byte[]> iter = new Iterator<byte[]>() {
private static final int TIMEOUT_MS = 10000;
private Iterator<ConsumerRecord<byte[], byte[]>> recordIter = null;
private byte[] next = null;
private byte[] fetchNext() {
while (true) {
long curTime = Time.SYSTEM.milliseconds();
if (curTime - prodTimeMs > TIMEOUT_MS)
throw new RuntimeException("Timed out after " + TIMEOUT_MS + " ms.");
if (recordIter == null) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
recordIter = records.iterator();
}
if (recordIter.hasNext())
return recordIter.next().value();
recordIter = null;
}
}
@Override
public boolean hasNext() {
if (next != null)
return true;
next = fetchNext();
return next != null;
}
@Override
public byte[] next() {
if (!hasNext())
throw new NoSuchElementException();
byte[] cur = next;
next = null;
return cur;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
byte[] next = iter.next();
try {
compareArrays(message1, next);
log.debug("Found first message...");
} catch (RuntimeException e) {
throw new RuntimeException("The first message in this topic was not ours. Please use a new topic when " + "running this program.");
}
try {
next = iter.next();
if (testConfig.expectRecordTooLargeException)
throw new RuntimeException("Expected to get a RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
try {
compareArrays(message2, next);
} catch (RuntimeException e) {
System.out.println("The second message in this topic was not ours. Please use a new " + "topic when running this program.");
Exit.exit(1);
}
} catch (RecordTooLargeException e) {
log.debug("Got RecordTooLargeException", e);
if (!testConfig.expectRecordTooLargeException)
throw new RuntimeException("Got an unexpected RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
}
log.debug("Closing consumer.");
consumer.close();
log.info("Closed consumer.");
}
Aggregations