use of kafka.javaapi.PartitionMetadata in project druid by druid-io.
the class KafkaSimpleConsumer method findNewLeader.
private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
long retryCnt = 0;
while (true) {
PartitionMetadata metadata = findLeader();
if (metadata != null) {
replicaBrokers.clear();
for (Broker replica : metadata.replicas()) {
replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
}
log.debug("Got new Kafka leader metadata : [%s], previous leader : [%s]", metadata, oldLeader);
Broker newLeader = metadata.leader();
if (newLeader != null) {
// just in case if Zookeeper doesn't get updated fast enough
if (oldLeader == null || isValidNewLeader(newLeader) || retryCnt != 0) {
return newLeader;
}
}
}
Thread.sleep(RETRY_INTERVAL);
retryCnt++;
// find one via allBrokers
if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
log.warn("cannot find leader for [%s] - [%s] after [%s] retries", topic, partitionId, retryCnt);
replicaBrokers.clear();
replicaBrokers.addAll(allBrokers);
}
}
}
use of kafka.javaapi.PartitionMetadata in project presto by prestodb.
the class KafkaSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) {
// Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(connectorId, metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), part.partitionId(), offsets[i], offsets[i - 1], partitionLeader);
splits.add(split);
}
}
}
return new FixedSplitSource(splits.build());
}
use of kafka.javaapi.PartitionMetadata in project voltdb by VoltDB.
the class KafkaTopicPartitionImporter method findNewLeader.
//Find leader for this topic partition.
private HostAndPort findNewLeader() {
for (int i = 0; i < 3; i++) {
boolean shouldSleep = false;
PartitionMetadata metadata = findLeader();
if (metadata == null) {
shouldSleep = true;
} else if (metadata.leader() == null) {
shouldSleep = true;
} else if (m_config.getPartitionLeader().getHost().equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
shouldSleep = true;
} else {
return new HostAndPort(metadata.leader().host(), metadata.leader().port());
}
if (shouldSleep) {
backoffSleep(i + 1);
}
}
//Unable to find return null for recheck.
rateLimitedLog(Level.WARN, null, "Failed to find new leader for " + m_topicAndPartition);
return null;
}
use of kafka.javaapi.PartitionMetadata in project apex-malhar by apache.
the class AbstractExactlyOnceKafkaOutputOperator method initializeLastProcessingOffset.
private void initializeLastProcessingOffset() {
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String) getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[], byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
use of kafka.javaapi.PartitionMetadata in project opennms by OpenNMS.
the class KafkaOffsetProvider method getLastOffset.
public long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime) {
long lastOffset = 0;
try {
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse topicMetadataResponse = consumer.send(req);
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
for (TopicMetadata topicMetadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition) {
String partitionHost = partitionMetadata.leader().host();
consumer = getConsumer(partitionHost, partitionMetadata.leader().port());
break;
}
}
}
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), KafkaOffsetConstants.CLIENT_NAME);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
LOGGER.error("Error fetching Offset Data from the Broker. Reason: {}", response.errorCode(topic, partition));
lastOffset = 0;
}
long[] offsets = response.offsets(topic, partition);
lastOffset = offsets[0];
} catch (Exception e) {
LOGGER.error("Error while collecting the log Size for topic: {}:{} ", topic, partition, e);
}
return lastOffset;
}
Aggregations