use of kafka.javaapi.PartitionMetadata in project apex-malhar by apache.
the class AbstractKafkaInputOperator method replay.
protected void replay(long windowId) {
try {
@SuppressWarnings("unchecked") Map<KafkaPartition, MutablePair<Long, Integer>> recoveredData = (Map<KafkaPartition, MutablePair<Long, Integer>>) windowDataManager.retrieve(windowId);
if (recoveredData != null) {
Map<String, List<PartitionMetadata>> pms = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().topic);
if (pms != null) {
SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
// add all partition request in one Fretch request together
FetchRequestBuilder frb = new FetchRequestBuilder().clientId(cons.getClientId());
for (Map.Entry<KafkaPartition, MutablePair<Long, Integer>> rc : recoveredData.entrySet()) {
KafkaPartition kp = rc.getKey();
List<PartitionMetadata> pmsVal = pms.get(kp.getClusterId());
Iterator<PartitionMetadata> pmIterator = pmsVal.iterator();
PartitionMetadata pm = pmIterator.next();
while (pm.partitionId() != kp.getPartitionId()) {
if (!pmIterator.hasNext()) {
break;
}
pm = pmIterator.next();
}
if (pm.partitionId() != kp.getPartitionId()) {
continue;
}
Broker bk = pm.leader();
frb.addFetch(consumer.topic, rc.getKey().getPartitionId(), rc.getValue().left, cons.getBufferSize());
FetchRequest req = frb.build();
SimpleConsumer ksc = new SimpleConsumer(bk.host(), bk.port(), cons.getTimeout(), cons.getBufferSize(), cons.getClientId());
FetchResponse fetchResponse = ksc.fetch(req);
Integer count = 0;
for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kp.getPartitionId())) {
KafkaConsumer.KafkaMessage kafkaMessage = new KafkaConsumer.KafkaMessage(kp, msg.message(), msg.offset());
emitTuple(kafkaMessage);
offsetStats.put(kp, msg.offset());
count = count + 1;
if (count.equals(rc.getValue().right)) {
break;
}
}
}
}
}
if (windowId == windowDataManager.getLargestCompletedWindow()) {
// Start the consumer at the largest recovery window
SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
// Set the offset positions to the consumer
Map<KafkaPartition, Long> currentOffsets = new HashMap<KafkaPartition, Long>(cons.getCurrentOffsets());
// Increment the offsets
for (Map.Entry<KafkaPartition, Long> e : offsetStats.entrySet()) {
currentOffsets.put(e.getKey(), e.getValue() + 1);
}
cons.resetOffset(currentOffsets);
cons.start();
}
} catch (IOException e) {
throw new RuntimeException("replay", e);
}
}
use of kafka.javaapi.PartitionMetadata in project jstorm by alibaba.
the class KafkaConsumer method findLeader.
protected PartitionMetadata findLeader(int partition) {
PartitionMetadata returnMetaData = null;
int errors = 0;
int size = brokerList.size();
Host brokerHost = brokerList.get(brokerIndex);
try {
if (consumer == null) {
consumer = new SimpleConsumer(brokerHost.getHost(), brokerHost.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
}
} catch (Exception e) {
LOG.warn(e.getMessage(), e);
consumer = null;
}
int i = brokerIndex;
loop: while (i < size && errors < size + 1) {
Host host = brokerList.get(i);
i = (i + 1) % size;
// next index
brokerIndex = i;
try {
if (consumer == null) {
consumer = new SimpleConsumer(host.getHost(), host.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
}
List<String> topics = Collections.singletonList(config.topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = null;
try {
resp = consumer.send(req);
} catch (Exception e) {
errors += 1;
LOG.error("findLeader error, broker:" + host.toString() + ", will change to next broker index:" + (i + 1) % size);
if (consumer != null) {
consumer.close();
consumer = null;
}
continue;
}
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
LOG.error("Error communicating with Broker:" + host.toString() + ", find Leader for partition:" + partition);
} finally {
if (consumer != null) {
consumer.close();
consumer = null;
}
}
}
return returnMetaData;
}
Aggregations