use of org.apache.kafka.clients.consumer.ConsumerRecord in project streamsx.kafka by IBMStreams.
the class TransactionalKafkaProducerClient method getCommittedSequenceIdFromCtrlTopic.
@SuppressWarnings("rawtypes")
private long getCommittedSequenceIdFromCtrlTopic() throws Exception {
KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(getConsumerProperties());
HashMap<TopicPartition, Long> endOffsets = getControlTopicEndOffsets(consumer);
// move the consumer to initial offset to begin consuming from
consumer.assign(controlTopicInitialOffsets.keySet());
controlTopicInitialOffsets.forEach((tp, offset) -> {
consumer.seek(tp, offset);
});
long committedSeqId = 0;
boolean consumerAtEnd = false;
while (!consumerAtEnd) {
ConsumerRecords<?, ?> records = consumer.poll(1000);
if (logger.isDebugEnabled())
logger.debug("ConsumerRecords: " + records);
Iterator<?> it = records.iterator();
// Records from different partitions can be scrambled. So we cannot assume that the last record returned by the iterator contains the last committed sequence-ID.
while (it.hasNext()) {
ConsumerRecord record = (ConsumerRecord) it.next();
Headers headers = record.headers();
if (logger.isDebugEnabled())
logger.debug("Headers: " + headers);
String tid = new String(headers.lastHeader(TRANSACTION_ID).value(), StandardCharsets.UTF_8);
if (logger.isDebugEnabled())
logger.debug("Checking tid=" + tid + " (currentTid=" + getTransactionalId() + "); from " + record.topic() + "-" + record.partition());
if (tid.equals(getTransactionalId())) {
long decodedSeqId = Long.valueOf(new String(headers.lastHeader(COMMITTED_SEQUENCE_ID).value(), StandardCharsets.UTF_8));
if (decodedSeqId > committedSeqId)
committedSeqId = decodedSeqId;
}
}
consumerAtEnd = isConsumerAtEnd(consumer, endOffsets);
if (logger.isDebugEnabled())
logger.debug("consumerAtEnd=" + consumerAtEnd);
}
consumer.close(1l, TimeUnit.SECONDS);
return committedSeqId;
}
Aggregations