use of org.graylog.shaded.kafka09.consumer.KafkaStream in project graylog2-server by Graylog2.
the class KafkaTransport method doLaunchLegacy.
private void doLaunchLegacy(final MessageInput input) {
final Properties props = new Properties();
props.put("group.id", configuration.getString(CK_GROUP_ID, DEFAULT_GROUP_ID));
props.put("client.id", "gl2-" + nodeId.getShortNodeId() + "-" + input.getId());
props.put("fetch.min.bytes", String.valueOf(configuration.getInt(CK_FETCH_MIN_BYTES)));
props.put("fetch.wait.max.ms", String.valueOf(configuration.getInt(CK_FETCH_WAIT_MAX)));
props.put("zookeeper.connect", configuration.getString(CK_ZOOKEEPER));
props.put("auto.offset.reset", configuration.getString(CK_OFFSET_RESET, DEFAULT_OFFSET_RESET));
// Default auto commit interval is 60 seconds. Reduce to 1 second to minimize message duplication
// if something breaks.
props.put("auto.commit.interval.ms", "1000");
// Set a consumer timeout to avoid blocking on the consumer iterator.
props.put("consumer.timeout.ms", "1000");
insertCustomProperties(props);
final int numThreads = configuration.getInt(CK_THREADS);
final ConsumerConfig consumerConfig = new ConsumerConfig(props);
cc = Consumer.createJavaConsumerConnector(consumerConfig);
final TopicFilter filter = new Whitelist(configuration.getString(CK_TOPIC_FILTER));
final List<KafkaStream<byte[], byte[]>> streams = cc.createMessageStreamsByFilter(filter, numThreads);
// this is being used during shutdown to first stop all submitted jobs before committing the offsets back to zookeeper
// and then shutting down the connection.
// this is to avoid yanking away the connection from the consumer runnables
stopLatch = new CountDownLatch(streams.size());
for (final KafkaStream<byte[], byte[]> stream : streams) {
executor.submit(new Runnable() {
@Override
public void run() {
final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();
boolean retry;
do {
retry = false;
try {
// noinspection WhileLoopReplaceableByForEach
while (consumerIterator.hasNext()) {
if (paused) {
// we try not to spin here, so we wait until the lifecycle goes back to running.
LOG.debug("Message processing is paused, blocking until message processing is turned back on.");
Uninterruptibles.awaitUninterruptibly(pausedLatch);
}
// check for being stopped before actually getting the message, otherwise we could end up losing that message
if (stopped) {
break;
}
if (isThrottled()) {
blockUntilUnthrottled();
}
// process the message, this will immediately mark the message as having been processed. this gets tricky
// if we get an exception about processing it down below.
final MessageAndMetadata<byte[], byte[]> message = consumerIterator.next();
final byte[] bytes = message.message();
// it is possible that the message is null
if (bytes == null) {
continue;
}
totalBytesRead.addAndGet(bytes.length);
lastSecBytesReadTmp.addAndGet(bytes.length);
final RawMessage rawMessage = new RawMessage(bytes);
input.processRawMessage(rawMessage);
}
} catch (ConsumerTimeoutException e) {
// Happens when there is nothing to consume, retry to check again.
retry = true;
} catch (Exception e) {
LOG.error("Kafka consumer error, stopping consumer thread.", e);
}
} while (retry && !stopped);
// explicitly commit our offsets when stopping.
// this might trigger a couple of times, but it won't hurt
cc.commitOffsets();
stopLatch.countDown();
}
});
}
}
Aggregations