use of org.apache.twill.kafka.client.BrokerInfo in project cdap by caskdata.
the class KafkaConsumer method getConsumer.
private SimpleConsumer getConsumer() {
if (consumer != null) {
return consumer;
}
BrokerInfo leader = brokerService.getLeader(topic, partition);
consumer = new SimpleConsumer(leader.getHost(), leader.getPort(), TIMEOUT_MS, BUFFER_SIZE_BYTES, clientName);
return consumer;
}
use of org.apache.twill.kafka.client.BrokerInfo in project cdap by caskdata.
the class KafkaLogProcessorPipeline method getKafkaConsumer.
/**
* Returns a {@link KafkaSimpleConsumer} for the given partition.
*/
@Nullable
private KafkaSimpleConsumer getKafkaConsumer(String topic, int partition) {
BrokerInfo leader = brokerService.getLeader(topic, partition);
if (leader == null) {
return null;
}
KafkaSimpleConsumer consumer = kafkaConsumers.get(leader);
if (consumer != null) {
return consumer;
}
consumer = new KafkaSimpleConsumer(leader, KAFKA_SO_TIMEOUT, config.getKafkaFetchBufferSize(), "client-" + name + "-" + partition);
kafkaConsumers.put(leader, consumer);
return consumer;
}
use of org.apache.twill.kafka.client.BrokerInfo in project cdap by caskdata.
the class KafkaOffsetResolver method getStartOffset.
/**
* Check whether the message fetched with the offset {@code checkpoint.getNextOffset() - 1} contains the
* same timestamp as in the given checkpoint. If they match, directly return {@code checkpoint.getNextOffset()}.
* If they don't, search for the smallest offset of the message with the same log event time
* as {@code checkpoint.getNextEventTime()}
*
* @param checkpoint A {@link Checkpoint} containing the next offset of a message and its log event timestamp.
* {@link Checkpoint#getNextOffset()}, {@link Checkpoint#getNextEventTime()}
* and {@link Checkpoint#getMaxEventTime()} all must return a non-negative long
* @param partition the partition in the topic for searching matching offset
* @return the next offset of the message with smallest offset and log event time equal to
* {@code checkpoint.getNextEventTime()}.
* {@code -1} if no such offset can be found or {@code checkpoint.getNextOffset()} is negative.
*
* @throws LeaderNotAvailableException if there is no Kafka broker to talk to.
* @throws OffsetOutOfRangeException if the given offset is out of range.
* @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader
* for the given topic and partition.
* @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server
* @throws UnknownServerException if the Kafka server responded with error.
*/
long getStartOffset(final Checkpoint checkpoint, final int partition) {
// This should never happen
Preconditions.checkArgument(checkpoint.getNextOffset() > 0, "Invalid checkpoint offset");
// Get BrokerInfo for constructing SimpleConsumer
String topic = config.getTopic();
BrokerInfo brokerInfo = brokerService.getLeader(topic, partition);
if (brokerInfo == null) {
throw new LeaderNotAvailableException(String.format("BrokerInfo from BrokerService is null for topic %s partition %d. Will retry in next run.", topic, partition));
}
SimpleConsumer consumer = new SimpleConsumer(brokerInfo.getHost(), brokerInfo.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, "offset-finder-" + topic + "-" + partition);
// Check whether the message fetched with the offset in the given checkpoint has the timestamp from
// checkpoint.getNextOffset() - 1 to get the offset corresponding to the timestamp in checkpoint
long offset = checkpoint.getNextOffset() - 1;
try {
long timestamp = getEventTimeByOffset(consumer, partition, offset);
if (timestamp == checkpoint.getNextEventTime()) {
return checkpoint.getNextOffset();
}
// This can happen in replicated cluster
LOG.debug("Event timestamp in {}:{} at offset {} is {}. It doesn't match with checkpoint timestamp {}", topic, partition, offset, timestamp, checkpoint.getNextEventTime());
} catch (NotFoundException | OffsetOutOfRangeException e) {
// This means we can't find the timestamp. This can happen in replicated cluster
LOG.debug("Cannot get valid log event in {}:{} at offset {}", topic, partition, offset);
}
// Find offset that has an event that matches the timestamp
long nextOffset = findStartOffset(consumer, partition, checkpoint.getNextEventTime());
LOG.debug("Found new nextOffset {} for topic {} partition {} with existing checkpoint {}.", nextOffset, topic, partition, checkpoint);
return nextOffset;
}
Aggregations