use of kafka.api.PartitionOffsetRequestInfo in project cdap by caskdata.
the class KafkaConsumer method fetchOffsetBefore.
/**
* Fetch offset before given time.
* @param timeMillis offset to fetch before timeMillis.
* @return Kafka message offset
*/
public long fetchOffsetBefore(long timeMillis) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(timeMillis, 1));
OffsetRequest request = new OffsetRequest(requestInfo, CurrentVersion(), clientName);
SimpleConsumer consumer = getConsumer();
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
// Try once more
closeConsumer();
consumer = getConsumer();
response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
closeConsumer();
throw new RuntimeException(String.format("Error fetching offset data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, response.errorCode(topic, partition)));
}
}
long[] offsets = response.offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
}
// Otherwise throw exception.
if (timeMillis != kafka.api.OffsetRequest.EarliestTime()) {
return fetchOffsetBefore(kafka.api.OffsetRequest.EarliestTime());
}
closeConsumer();
throw new RuntimeException(String.format("Got zero offsets in offset response for time %s from broker %s:%d for topic %s, partition %d", timeMillis, consumer.host(), consumer.port(), topic, partition));
}
use of kafka.api.PartitionOffsetRequestInfo in project flink by apache.
the class SimpleConsumerThread method requestAndSetSpecificTimeOffsetsFromKafka.
// ------------------------------------------------------------------------
// Kafka Request Utils
// ------------------------------------------------------------------------
/**
* Request offsets before a specific time for a set of partitions, via a Kafka consumer.
*
* @param consumer The consumer connected to lead broker
* @param partitions The list of partitions we need offsets for
* @param whichTime The type of time we are requesting. -1 and -2 are special constants (See OffsetRequest)
*/
private static void requestAndSetSpecificTimeOffsetsFromKafka(SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions, long whichTime) throws IOException {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(whichTime, 1));
}
requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo);
}
use of kafka.api.PartitionOffsetRequestInfo in project storm by apache.
the class KafkaOffsetLagUtil method getLogHeadOffsets.
private static Map<String, Map<Integer, Long>> getLogHeadOffsets(Map<String, List<TopicPartition>> leadersAndTopicPartitions) {
Map<String, Map<Integer, Long>> result = new HashMap<>();
if (leadersAndTopicPartitions != null) {
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
SimpleConsumer simpleConsumer = null;
for (Map.Entry<String, List<TopicPartition>> leader : leadersAndTopicPartitions.entrySet()) {
try {
simpleConsumer = new SimpleConsumer(leader.getKey().split(":")[0], Integer.parseInt(leader.getKey().split(":")[1]), 10000, 64 * 1024, "LogHeadOffsetRequest");
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
for (TopicPartition topicPartition : leader.getValue()) {
requestInfo.put(new TopicAndPartition(topicPartition.topic(), topicPartition.partition()), partitionOffsetRequestInfo);
if (!result.containsKey(topicPartition.topic())) {
result.put(topicPartition.topic(), new HashMap<Integer, Long>());
}
}
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), "LogHeadOffsetRequest");
OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
for (TopicPartition topicPartition : leader.getValue()) {
result.get(topicPartition.topic()).put(topicPartition.partition(), response.offsets(topicPartition.topic(), topicPartition.partition())[0]);
}
} finally {
if (simpleConsumer != null) {
simpleConsumer.close();
}
}
}
}
return result;
}
use of kafka.api.PartitionOffsetRequestInfo in project jstorm by alibaba.
the class KafkaConsumer method getOffset.
public long getOffset(String topic, int partition, long startOffsetTime) {
SimpleConsumer simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
LOG.error("Error consumer is null get offset from partition:" + partition);
return -1;
}
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
use of kafka.api.PartitionOffsetRequestInfo in project heron by twitter.
the class KafkaUtils method getOffset.
public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
Aggregations