use of kafka.javaapi.OffsetResponse in project cdap by caskdata.
the class KafkaConsumer method fetchOffsetBefore.
/**
* Fetch offset before given time.
* @param timeMillis offset to fetch before timeMillis.
* @return Kafka message offset
*/
public long fetchOffsetBefore(long timeMillis) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(timeMillis, 1));
OffsetRequest request = new OffsetRequest(requestInfo, CurrentVersion(), clientName);
SimpleConsumer consumer = getConsumer();
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
// Try once more
closeConsumer();
consumer = getConsumer();
response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
closeConsumer();
throw new RuntimeException(String.format("Error fetching offset data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, response.errorCode(topic, partition)));
}
}
long[] offsets = response.offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
}
// Otherwise throw exception.
if (timeMillis != kafka.api.OffsetRequest.EarliestTime()) {
return fetchOffsetBefore(kafka.api.OffsetRequest.EarliestTime());
}
closeConsumer();
throw new RuntimeException(String.format("Got zero offsets in offset response for time %s from broker %s:%d for topic %s, partition %d", timeMillis, consumer.host(), consumer.port(), topic, partition));
}
use of kafka.javaapi.OffsetResponse in project apex-malhar by apache.
the class KafkaMetadataUtil method getLastOffset.
/**
* @param consumer
* @param topic
* @param partition
* @param whichTime
* @param clientName
* @return 0 if consumer is null at this time
*/
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
if (consumer == null) {
return 0;
}
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
logger.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
}
use of kafka.javaapi.OffsetResponse in project storm by apache.
the class KafkaOffsetLagUtil method getLogHeadOffsets.
private static Map<String, Map<Integer, Long>> getLogHeadOffsets(Map<String, List<TopicPartition>> leadersAndTopicPartitions) {
Map<String, Map<Integer, Long>> result = new HashMap<>();
if (leadersAndTopicPartitions != null) {
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
SimpleConsumer simpleConsumer = null;
for (Map.Entry<String, List<TopicPartition>> leader : leadersAndTopicPartitions.entrySet()) {
try {
simpleConsumer = new SimpleConsumer(leader.getKey().split(":")[0], Integer.parseInt(leader.getKey().split(":")[1]), 10000, 64 * 1024, "LogHeadOffsetRequest");
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
for (TopicPartition topicPartition : leader.getValue()) {
requestInfo.put(new TopicAndPartition(topicPartition.topic(), topicPartition.partition()), partitionOffsetRequestInfo);
if (!result.containsKey(topicPartition.topic())) {
result.put(topicPartition.topic(), new HashMap<Integer, Long>());
}
}
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), "LogHeadOffsetRequest");
OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
for (TopicPartition topicPartition : leader.getValue()) {
result.get(topicPartition.topic()).put(topicPartition.partition(), response.offsets(topicPartition.topic(), topicPartition.partition())[0]);
}
} finally {
if (simpleConsumer != null) {
simpleConsumer.close();
}
}
}
}
return result;
}
use of kafka.javaapi.OffsetResponse in project flink by apache.
the class SimpleConsumerThread method requestAndSetOffsetsFromKafka.
/**
* Request offsets from Kafka with a specified set of partition's offset request information.
* The returned offsets are used to set the internal partition states.
*
* <p>This method retries three times if the response has an error.
*
* @param consumer The consumer connected to lead broker
* @param partitionStates the partition states, will be set with offsets fetched from Kafka request
* @param partitionToRequestInfo map of each partition to its offset request info
*/
private static void requestAndSetOffsetsFromKafka(SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates, Map<TopicAndPartition, PartitionOffsetRequestInfo> partitionToRequestInfo) throws IOException {
int retries = 0;
OffsetResponse response;
while (true) {
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(partitionToRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
StringBuilder exception = new StringBuilder();
for (KafkaTopicPartitionState<TopicAndPartition> part : partitionStates) {
short code;
if ((code = response.errorCode(part.getTopic(), part.getPartition())) != ErrorMapping.NoError()) {
exception.append("\nException for topic=").append(part.getTopic()).append(" partition=").append(part.getPartition()).append(": ").append(StringUtils.stringifyException(ErrorMapping.exceptionFor(code)));
}
}
if (++retries >= 3) {
throw new IOException("Unable to get last offset for partitions " + partitionStates + ": " + exception.toString());
} else {
LOG.warn("Unable to get last offset for partitions: Exception(s): {}", exception);
}
} else {
// leave retry loop
break;
}
}
for (KafkaTopicPartitionState<TopicAndPartition> part : partitionStates) {
// there will be offsets only for partitions that were requested for
if (partitionToRequestInfo.containsKey(part.getKafkaPartitionHandle())) {
final long offset = response.offsets(part.getTopic(), part.getPartition())[0];
// the offset returned is that of the next record to fetch. because our state reflects the latest
// successfully emitted record, we subtract one
part.setOffset(offset - 1);
}
}
}
use of kafka.javaapi.OffsetResponse in project voltdb by VoltDB.
the class KafkaTopicPartitionImporter method getTopicOffset.
private OffsetResponse getTopicOffset(PartitionOffsetRequestInfo pori) {
final int partition = m_topicAndPartition.partition();
final String topic = m_topicAndPartition.topic();
kafka.javaapi.OffsetRequest earlyRq = new kafka.javaapi.OffsetRequest(singletonMap(m_topicAndPartition, pori), kafka.api.OffsetRequest.CurrentVersion(), KafkaStreamImporterConfig.CLIENT_ID);
OffsetResponse response = null;
Throwable fault = null;
for (int attempts = 0; attempts < 3; ++attempts) try {
response = m_consumer.getOffsetsBefore(earlyRq);
if (response.hasError()) {
short code = response.errorCode(topic, partition);
fault = ErrorMapping.exceptionFor(code);
resetLeader();
} else {
return response;
}
} catch (Exception e) {
if (e instanceof IOException) {
resetLeader();
}
fault = e;
}
if (fault != null) {
rateLimitedLog(Level.ERROR, fault, "unable to fetch earliest offset for " + m_topicAndPartition);
response = null;
}
return response;
}
Aggregations