use of kafka.common.TopicAndPartition in project jstorm by alibaba.
the class KafkaConsumer method getOffset.
public long getOffset(String topic, int partition, long startOffsetTime) {
SimpleConsumer simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
LOG.error("Error consumer is null get offset from partition:" + partition);
return -1;
}
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
use of kafka.common.TopicAndPartition in project heron by twitter.
the class KafkaUtils method getOffset.
public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
use of kafka.common.TopicAndPartition in project graylog2-server by Graylog2.
the class KafkaJournal method flushDirtyLogs.
/**
* A Java transliteration of what the scala implementation does, which unfortunately is declared as private
*/
protected void flushDirtyLogs() {
LOG.debug("Checking for dirty logs to flush...");
final Set<Map.Entry<TopicAndPartition, Log>> entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet();
for (final Map.Entry<TopicAndPartition, Log> topicAndPartitionLogEntry : entries) {
final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey();
final Log kafkaLog = topicAndPartitionLogEntry.getValue();
final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime();
try {
LOG.debug("Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}", topicAndPartition.topic(), kafkaLog.config().flushInterval(), kafkaLog.lastFlushTime(), timeSinceLastFlush);
if (timeSinceLastFlush >= kafkaLog.config().flushMs()) {
kafkaLog.flush();
}
} catch (Exception e) {
LOG.error("Error flushing topic " + topicAndPartition.topic(), e);
}
}
}
Aggregations