Search in sources :

Example 6 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project druid by druid-io.

the class KafkaSimpleConsumer method findNewLeader.

private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
    long retryCnt = 0;
    while (true) {
        PartitionMetadata metadata = findLeader();
        if (metadata != null) {
            replicaBrokers.clear();
            for (Broker replica : metadata.replicas()) {
                replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
            }
            log.debug("Got new Kafka leader metadata : [%s], previous leader : [%s]", metadata, oldLeader);
            Broker newLeader = metadata.leader();
            if (newLeader != null) {
                // just in case if Zookeeper doesn't get updated fast enough
                if (oldLeader == null || isValidNewLeader(newLeader) || retryCnt != 0) {
                    return newLeader;
                }
            }
        }
        Thread.sleep(RETRY_INTERVAL);
        retryCnt++;
        // find one via allBrokers
        if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
            log.warn("cannot find leader for [%s] - [%s] after [%s] retries", topic, partitionId, retryCnt);
            replicaBrokers.clear();
            replicaBrokers.addAll(allBrokers);
        }
    }
}
Also used : Broker(kafka.cluster.Broker) PartitionMetadata(kafka.javaapi.PartitionMetadata)

Example 7 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project presto by prestodb.

the class KafkaSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
    KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
    SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
    TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
    TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
    for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
        for (PartitionMetadata part : metadata.partitionsMetadata()) {
            log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
            Broker leader = part.leader();
            if (leader == null) {
                // Leader election going on...
                log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
                continue;
            }
            HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
            SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
            // Kafka contains a reverse list of "end - start" pairs for the splits
            long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
            for (int i = offsets.length - 1; i > 0; i--) {
                KafkaSplit split = new KafkaSplit(connectorId, metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), part.partitionId(), offsets[i], offsets[i - 1], partitionLeader);
                splits.add(split);
            }
        }
    }
    return new FixedSplitSource(splits.build());
}
Also used : Broker(kafka.cluster.Broker) ImmutableList(com.google.common.collect.ImmutableList) TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) TopicMetadataResponse(kafka.javaapi.TopicMetadataResponse) HostAddress(com.facebook.presto.spi.HostAddress) TopicMetadata(kafka.javaapi.TopicMetadata) FixedSplitSource(com.facebook.presto.spi.FixedSplitSource) PartitionMetadata(kafka.javaapi.PartitionMetadata) ConnectorSplit(com.facebook.presto.spi.ConnectorSplit) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 8 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method findNewLeader.

//Find leader for this topic partition.
private HostAndPort findNewLeader() {
    for (int i = 0; i < 3; i++) {
        boolean shouldSleep = false;
        PartitionMetadata metadata = findLeader();
        if (metadata == null) {
            shouldSleep = true;
        } else if (metadata.leader() == null) {
            shouldSleep = true;
        } else if (m_config.getPartitionLeader().getHost().equalsIgnoreCase(metadata.leader().host()) && i == 0) {
            // first time through if the leader hasn't changed give ZooKeeper a second to recover
            // second time, assume the broker did recover before failover, or it was a non-Broker issue
            shouldSleep = true;
        } else {
            return new HostAndPort(metadata.leader().host(), metadata.leader().port());
        }
        if (shouldSleep) {
            backoffSleep(i + 1);
        }
    }
    //Unable to find return null for recheck.
    rateLimitedLog(Level.WARN, null, "Failed to find new leader for " + m_topicAndPartition);
    return null;
}
Also used : HostAndPort(org.voltdb.importclient.kafka.KafkaStreamImporterConfig.HostAndPort) PartitionMetadata(kafka.javaapi.PartitionMetadata)

Example 9 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project apex-malhar by apache.

the class AbstractExactlyOnceKafkaOutputOperator method initializeLastProcessingOffset.

private void initializeLastProcessingOffset() {
    // read last received kafka message
    TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String) getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
    if (tm == null) {
        throw new RuntimeException("Failed to retrieve topic metadata");
    }
    partitionNum = tm.partitionsMetadata().size();
    lastMsgs = new HashMap<Integer, Pair<byte[], byte[]>>(partitionNum);
    for (PartitionMetadata pm : tm.partitionsMetadata()) {
        String leadBroker = pm.leader().host();
        int port = pm.leader().port();
        String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
        SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
        long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
        FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
        FetchResponse fetchResponse = consumer.fetch(req);
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
            Message m = messageAndOffset.message();
            ByteBuffer payload = m.payload();
            ByteBuffer key = m.key();
            byte[] valueBytes = new byte[payload.limit()];
            byte[] keyBytes = new byte[key.limit()];
            payload.get(valueBytes);
            key.get(keyBytes);
            lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
        }
    }
}
Also used : Message(kafka.message.Message) KeyedMessage(kafka.producer.KeyedMessage) FetchResponse(kafka.javaapi.FetchResponse) MessageAndOffset(kafka.message.MessageAndOffset) ByteBuffer(java.nio.ByteBuffer) TopicMetadata(kafka.javaapi.TopicMetadata) FetchRequestBuilder(kafka.api.FetchRequestBuilder) FetchRequest(kafka.api.FetchRequest) PartitionMetadata(kafka.javaapi.PartitionMetadata) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer) Pair(com.datatorrent.common.util.Pair)

Example 10 with PartitionMetadata

use of kafka.javaapi.PartitionMetadata in project opennms by OpenNMS.

the class KafkaOffsetProvider method getLastOffset.

public long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime) {
    long lastOffset = 0;
    try {
        List<String> topics = Collections.singletonList(topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse topicMetadataResponse = consumer.send(req);
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        for (TopicMetadata topicMetadata : topicMetadataResponse.topicsMetadata()) {
            for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
                if (partitionMetadata.partitionId() == partition) {
                    String partitionHost = partitionMetadata.leader().host();
                    consumer = getConsumer(partitionHost, partitionMetadata.leader().port());
                    break;
                }
            }
        }
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), KafkaOffsetConstants.CLIENT_NAME);
        OffsetResponse response = consumer.getOffsetsBefore(request);
        if (response.hasError()) {
            LOGGER.error("Error fetching Offset Data from the Broker. Reason: {}", response.errorCode(topic, partition));
            lastOffset = 0;
        }
        long[] offsets = response.offsets(topic, partition);
        lastOffset = offsets[0];
    } catch (Exception e) {
        LOGGER.error("Error while collecting the log Size for topic: {}:{} ", topic, partition, e);
    }
    return lastOffset;
}
Also used : OffsetResponse(kafka.javaapi.OffsetResponse) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) TopicMetadata(kafka.javaapi.TopicMetadata) PartitionOffsetRequestInfo(kafka.api.PartitionOffsetRequestInfo) PartitionMetadata(kafka.javaapi.PartitionMetadata) TopicAndPartition(kafka.common.TopicAndPartition)

Aggregations

PartitionMetadata (kafka.javaapi.PartitionMetadata)12 SimpleConsumer (kafka.javaapi.consumer.SimpleConsumer)8 TopicMetadata (kafka.javaapi.TopicMetadata)7 TopicMetadataRequest (kafka.javaapi.TopicMetadataRequest)6 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 Broker (kafka.cluster.Broker)4 IOException (java.io.IOException)3 LinkedList (java.util.LinkedList)3 List (java.util.List)3 ConnectException (java.net.ConnectException)2 SocketTimeoutException (java.net.SocketTimeoutException)2 UnresolvedAddressException (java.nio.channels.UnresolvedAddressException)2 Map (java.util.Map)2 FetchRequest (kafka.api.FetchRequest)2 FetchRequestBuilder (kafka.api.FetchRequestBuilder)2 KafkaException (kafka.common.KafkaException)2 FetchResponse (kafka.javaapi.FetchResponse)2 TopicMetadataResponse (kafka.javaapi.TopicMetadataResponse)2 MessageAndOffset (kafka.message.MessageAndOffset)2