use of kafka.api.PartitionOffsetRequestInfo in project pinot by linkedin.
the class SimpleConsumerWrapper method fetchPartitionOffset.
/**
* Fetches the numeric Kafka offset for this partition for a symbolic name ("largest" or "smallest").
*
* @param requestedOffset Either "largest" or "smallest"
* @param timeoutMillis Timeout in milliseconds
* @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
* milliseconds
* @return An offset
*/
public synchronized long fetchPartitionOffset(String requestedOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException {
Preconditions.checkNotNull(requestedOffset);
final long offsetRequestTime;
if (requestedOffset.equalsIgnoreCase("largest")) {
offsetRequestTime = kafka.api.OffsetRequest.LatestTime();
} else if (requestedOffset.equalsIgnoreCase("smallest")) {
offsetRequestTime = kafka.api.OffsetRequest.EarliestTime();
} else if (requestedOffset.equalsIgnoreCase("testDummy")) {
return -1L;
} else {
throw new IllegalArgumentException("Unknown initial offset value " + requestedOffset);
}
int kafkaErrorCount = 0;
final int MAX_KAFKA_ERROR_COUNT = 10;
final long endTime = System.currentTimeMillis() + timeoutMillis;
while (System.currentTimeMillis() < endTime) {
// Try to get into a state where we're connected to Kafka
while (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < endTime) {
_currentState.process();
}
if (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && endTime <= System.currentTimeMillis()) {
throw new TimeoutException();
}
// Send the offset request to Kafka
OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition), new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId);
OffsetResponse offsetResponse;
try {
offsetResponse = _simpleConsumer.getOffsetsBefore(request);
} catch (Exception e) {
_currentState.handleConsumerException(e);
continue;
}
final short errorCode = offsetResponse.errorCode(_topic, _partition);
if (errorCode == Errors.NONE.code()) {
long offset = offsetResponse.offsets(_topic, _partition)[0];
if (offset == 0L) {
LOGGER.warn("Fetched offset of 0 for topic {} and partition {}, is this a newly created topic?", _topic, _partition);
}
return offset;
} else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
// If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} else {
// Retry after a short delay
kafkaErrorCount++;
if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
throw exceptionForKafkaErrorCode(errorCode);
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
throw new TimeoutException();
}
use of kafka.api.PartitionOffsetRequestInfo in project storm by apache.
the class KafkaUtils method getOffset.
public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
use of kafka.api.PartitionOffsetRequestInfo in project druid by druid-io.
the class KafkaSimpleConsumer method getOffset.
private long getOffset(boolean earliest) throws InterruptedException {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
OffsetResponse response = null;
try {
response = consumer.getOffsetsBefore(request);
} catch (Exception e) {
ensureNotInterrupted(e);
log.error(e, "caught exception in getOffsetsBefore [%s] - [%s]", topic, partitionId);
return -1;
}
if (response.hasError()) {
log.error("error fetching data Offset from the Broker [%s]. reason: [%s]", leaderBroker.host(), response.errorCode(topic, partitionId));
return -1;
}
long[] offsets = response.offsets(topic, partitionId);
return earliest ? offsets[0] : offsets[offsets.length - 1];
}
use of kafka.api.PartitionOffsetRequestInfo in project flink by apache.
the class SimpleConsumerThread method requestAndSetEarliestOrLatestOffsetsFromKafka.
/**
* For a set of partitions, if a partition is set with the special offsets {@link OffsetRequest#EarliestTime()}
* or {@link OffsetRequest#LatestTime()}, replace them with actual offsets requested via a Kafka consumer.
*
* @param consumer The consumer connected to lead broker
* @param partitions The list of partitions we need offsets for
*/
private static void requestAndSetEarliestOrLatestOffsetsFromKafka(SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) {
requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(part.getOffset(), 1));
}
}
requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo);
}
use of kafka.api.PartitionOffsetRequestInfo in project presto by prestodb.
the class KafkaSplitManager method findAllOffsets.
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);
// The API implies that this will always return all of the offsets. So it seems a partition can not have
// more than Integer.MAX_VALUE-1 segments.
//
// This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
// should not be 0.
PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE);
OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
if (offsetResponse.hasError()) {
short errorCode = offsetResponse.errorCode(topicName, partitionId);
log.warn("Offset response has error: %d", errorCode);
throw new PrestoException(KAFKA_SPLIT_ERROR, "could not fetch data from Kafka, error code is '" + errorCode + "'");
}
return offsetResponse.offsets(topicName, partitionId);
}
Aggregations