use of kafka.cluster.Broker in project druid by druid-io.
the class KafkaSimpleConsumer method fetch.
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
FetchResponse response = null;
Broker previousLeader = leaderBroker;
while (true) {
ensureConsumer(previousLeader);
FetchRequest request = new FetchRequestBuilder().clientId(clientId).addFetch(topic, partitionId, offset, FETCH_SIZE).maxWait(timeoutMs).minBytes(1).build();
log.debug("fetch offset %s", offset);
try {
response = consumer.fetch(request);
} catch (Exception e) {
ensureNotInterrupted(e);
log.warn(e, "caught exception in fetch {} - {}", topic, partitionId);
response = null;
}
if (response == null || response.hasError()) {
short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
log.warn("fetch %s - %s with offset %s encounters error: [%s]", topic, partitionId, offset, errorCode);
boolean needNewLeader = false;
if (errorCode == ErrorMapping.RequestTimedOutCode()) {
log.info("kafka request timed out, response[%s]", response);
} else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
long newOffset = getOffset(earliest);
log.info("got [%s] offset[%s] for [%s][%s]", earliest ? "earliest" : "latest", newOffset, topic, partitionId);
if (newOffset < 0) {
needNewLeader = true;
} else {
offset = newOffset;
continue;
}
} else {
needNewLeader = true;
}
if (needNewLeader) {
stopConsumer();
previousLeader = leaderBroker;
leaderBroker = null;
continue;
}
} else {
break;
}
}
return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : EMPTY_MSGS;
}
use of kafka.cluster.Broker in project voltdb by VoltDB.
the class KafkaStreamImporterConfig method getConfigsForPartitions.
private static Map<URI, KafkaStreamImporterConfig> getConfigsForPartitions(String key, List<HostAndPort> brokerList, final String topic, String groupId, String procedure, int soTimeout, int fetchSize, String commitPolicy, FormatterBuilder formatterBuilder) {
SimpleConsumer consumer = null;
Map<URI, KafkaStreamImporterConfig> configs = new HashMap<>();
List<FailedMetaDataAttempt> attempts = new ArrayList<>();
Iterator<HostAndPort> hpitr = brokerList.iterator();
while (configs.isEmpty() && hpitr.hasNext()) {
HostAndPort hp = hpitr.next();
try {
consumer = new SimpleConsumer(hp.getHost(), hp.getPort(), soTimeout, fetchSize, CLIENT_ID);
TopicMetadataRequest req = new TopicMetadataRequest(singletonList(topic));
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
if (metaData == null) {
attempts.add(new FailedMetaDataAttempt("Failed to get topic metadata for topic " + topic + " from host " + hp.getHost(), null));
closeConsumer(consumer);
consumer = null;
continue;
}
int partitionCount = 0;
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
++partitionCount;
URI uri;
try {
uri = new URI("kafka", key, topic + "/partition/" + part.partitionId());
} catch (URISyntaxException ex) {
// Should not happen
throw new KafkaConfigurationException("unable to create topic resource URI", ex);
}
Broker leader = part.leader();
if (leader == null) {
attempts.add(new FailedMetaDataAttempt("Failed to get leader broker for topic " + topic + " partition " + part.partitionId() + " from host " + hp.getHost(), null));
continue;
}
KafkaStreamImporterConfig config = new KafkaStreamImporterConfig(uri, brokerList, topic, part.partitionId(), new HostAndPort(leader.host(), leader.port()), groupId, fetchSize, soTimeout, procedure, commitPolicy, formatterBuilder);
configs.put(uri, config);
}
}
if (configs.size() != partitionCount) {
configs.clear();
closeConsumer(consumer);
consumer = null;
}
} catch (Exception e) {
attempts.add(new FailedMetaDataAttempt("Failed to send topic metadata request for topic " + topic + " from host " + hp.getHost(), e));
} finally {
closeConsumer(consumer);
}
}
if (!attempts.isEmpty()) {
attempts.forEach((attempt) -> {
attempt.log();
});
attempts.clear();
if (configs.isEmpty()) {
throw new KafkaConfigurationException("Failed to get topic metadata for %s", topic);
}
}
return configs;
}
use of kafka.cluster.Broker in project druid by druid-io.
the class KafkaSimpleConsumer method findNewLeader.
private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
long retryCnt = 0;
while (true) {
PartitionMetadata metadata = findLeader();
if (metadata != null) {
replicaBrokers.clear();
for (Broker replica : metadata.replicas()) {
replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
}
log.debug("Got new Kafka leader metadata : [%s], previous leader : [%s]", metadata, oldLeader);
Broker newLeader = metadata.leader();
if (newLeader != null) {
// just in case if Zookeeper doesn't get updated fast enough
if (oldLeader == null || isValidNewLeader(newLeader) || retryCnt != 0) {
return newLeader;
}
}
}
Thread.sleep(RETRY_INTERVAL);
retryCnt++;
// find one via allBrokers
if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
log.warn("cannot find leader for [%s] - [%s] after [%s] retries", topic, partitionId, retryCnt);
replicaBrokers.clear();
replicaBrokers.addAll(allBrokers);
}
}
}
use of kafka.cluster.Broker in project presto by prestodb.
the class KafkaSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) {
// Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(connectorId, metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), part.partitionId(), offsets[i], offsets[i - 1], partitionLeader);
splits.add(split);
}
}
}
return new FixedSplitSource(splits.build());
}
Aggregations