Search in sources :

Example 11 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project apex-malhar by apache.

the class AbstractKafkaInputOperator method replay.

protected void replay(long windowId) {
    try {
        @SuppressWarnings("unchecked") Map<KafkaPartition, MutablePair<Long, Integer>> recoveredData = (Map<KafkaPartition, MutablePair<Long, Integer>>) windowDataManager.retrieve(windowId);
        if (recoveredData != null) {
            Map<String, List<PartitionMetadata>> pms = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().topic);
            if (pms != null) {
                SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
                // add all partition request in one Fretch request together
                FetchRequestBuilder frb = new FetchRequestBuilder().clientId(cons.getClientId());
                for (Map.Entry<KafkaPartition, MutablePair<Long, Integer>> rc : recoveredData.entrySet()) {
                    KafkaPartition kp = rc.getKey();
                    List<PartitionMetadata> pmsVal = pms.get(kp.getClusterId());
                    Iterator<PartitionMetadata> pmIterator = pmsVal.iterator();
                    PartitionMetadata pm = pmIterator.next();
                    while (pm.partitionId() != kp.getPartitionId()) {
                        if (!pmIterator.hasNext()) {
                            break;
                        }
                        pm = pmIterator.next();
                    }
                    if (pm.partitionId() != kp.getPartitionId()) {
                        continue;
                    }
                    Broker bk = pm.leader();
                    frb.addFetch(consumer.topic, rc.getKey().getPartitionId(), rc.getValue().left, cons.getBufferSize());
                    FetchRequest req = frb.build();
                    SimpleConsumer ksc = new SimpleConsumer(bk.host(), bk.port(), cons.getTimeout(), cons.getBufferSize(), cons.getClientId());
                    FetchResponse fetchResponse = ksc.fetch(req);
                    Integer count = 0;
                    for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kp.getPartitionId())) {
                        KafkaConsumer.KafkaMessage kafkaMessage = new KafkaConsumer.KafkaMessage(kp, msg.message(), msg.offset());
                        emitTuple(kafkaMessage);
                        offsetStats.put(kp, msg.offset());
                        count = count + 1;
                        if (count.equals(rc.getValue().right)) {
                            break;
                        }
                    }
                }
            }
        }
        if (windowId == windowDataManager.getLargestCompletedWindow()) {
            // Start the consumer at the largest recovery window
            SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
            // Set the offset positions to the consumer
            Map<KafkaPartition, Long> currentOffsets = new HashMap<KafkaPartition, Long>(cons.getCurrentOffsets());
            // Increment the offsets
            for (Map.Entry<KafkaPartition, Long> e : offsetStats.entrySet()) {
                currentOffsets.put(e.getKey(), e.getValue() + 1);
            }
            cons.resetOffset(currentOffsets);
            cons.start();
        }
    } catch (IOException e) {
        throw new RuntimeException("replay", e);
    }
}
Also used : HashMap(java.util.HashMap) MessageAndOffset(kafka.message.MessageAndOffset) MutablePair(org.apache.commons.lang3.tuple.MutablePair) FetchRequestBuilder(kafka.api.FetchRequestBuilder) FetchRequest(kafka.api.FetchRequest) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) Broker(kafka.cluster.Broker) FetchResponse(kafka.javaapi.FetchResponse) IOException(java.io.IOException) PartitionMetadata(kafka.javaapi.PartitionMetadata) HashMap(java.util.HashMap) Map(java.util.Map) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 12 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project jstorm by alibaba.

the class KafkaConsumer method findLeader.

protected PartitionMetadata findLeader(int partition) {
    PartitionMetadata returnMetaData = null;
    int errors = 0;
    int size = brokerList.size();
    Host brokerHost = brokerList.get(brokerIndex);
    try {
        if (consumer == null) {
            consumer = new SimpleConsumer(brokerHost.getHost(), brokerHost.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
        }
    } catch (Exception e) {
        LOG.warn(e.getMessage(), e);
        consumer = null;
    }
    int i = brokerIndex;
    loop: while (i < size && errors < size + 1) {
        Host host = brokerList.get(i);
        i = (i + 1) % size;
        // next index
        brokerIndex = i;
        try {
            if (consumer == null) {
                consumer = new SimpleConsumer(host.getHost(), host.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
            }
            List<String> topics = Collections.singletonList(config.topic);
            TopicMetadataRequest req = new TopicMetadataRequest(topics);
            kafka.javaapi.TopicMetadataResponse resp = null;
            try {
                resp = consumer.send(req);
            } catch (Exception e) {
                errors += 1;
                LOG.error("findLeader error, broker:" + host.toString() + ", will change to next broker index:" + (i + 1) % size);
                if (consumer != null) {
                    consumer.close();
                    consumer = null;
                }
                continue;
            }
            List<TopicMetadata> metaData = resp.topicsMetadata();
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    if (part.partitionId() == partition) {
                        returnMetaData = part;
                        break loop;
                    }
                }
            }
        } catch (Exception e) {
            LOG.error("Error communicating with Broker:" + host.toString() + ", find Leader for partition:" + partition);
        } finally {
            if (consumer != null) {
                consumer.close();
                consumer = null;
            }
        }
    }
    return returnMetaData;
}
Also used : TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) PartitionMetadata(kafka.javaapi.PartitionMetadata) List(java.util.List) LinkedList(java.util.LinkedList) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer) KafkaException(kafka.common.KafkaException) IOException(java.io.IOException) UnresolvedAddressException(java.nio.channels.UnresolvedAddressException) SocketTimeoutException(java.net.SocketTimeoutException) ConnectException(java.net.ConnectException) TopicMetadata(kafka.javaapi.TopicMetadata)

Aggregations

PartitionMetadata (kafka.javaapi.PartitionMetadata)12 SimpleConsumer (kafka.javaapi.consumer.SimpleConsumer)8 TopicMetadata (kafka.javaapi.TopicMetadata)7 TopicMetadataRequest (kafka.javaapi.TopicMetadataRequest)6 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 Broker (kafka.cluster.Broker)4 IOException (java.io.IOException)3 LinkedList (java.util.LinkedList)3 List (java.util.List)3 ConnectException (java.net.ConnectException)2 SocketTimeoutException (java.net.SocketTimeoutException)2 UnresolvedAddressException (java.nio.channels.UnresolvedAddressException)2 Map (java.util.Map)2 FetchRequest (kafka.api.FetchRequest)2 FetchRequestBuilder (kafka.api.FetchRequestBuilder)2 KafkaException (kafka.common.KafkaException)2 FetchResponse (kafka.javaapi.FetchResponse)2 TopicMetadataResponse (kafka.javaapi.TopicMetadataResponse)2 MessageAndOffset (kafka.message.MessageAndOffset)2