Search in sources :

Example 11 with SimpleConsumer

use of kafka.javaapi.consumer.SimpleConsumer in project flink by apache.

the class FlinkKafkaConsumer08 method getPartitionsForTopic.

// ------------------------------------------------------------------------
//  Kafka / ZooKeeper communication utilities
// ------------------------------------------------------------------------
/**
	 * Send request to Kafka to get partitions for topic.
	 * 
	 * @param topics The name of the topics.
	 * @param properties The properties for the Kafka Consumer that is used to query the partitions for the topic. 
	 */
public static List<KafkaTopicPartitionLeader> getPartitionsForTopic(List<String> topics, Properties properties) {
    String seedBrokersConfString = properties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
    final int numRetries = getInt(properties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES);
    checkNotNull(seedBrokersConfString, "Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
    String[] seedBrokers = seedBrokersConfString.split(",");
    List<KafkaTopicPartitionLeader> partitions = new ArrayList<>();
    final String clientId = "flink-kafka-consumer-partition-lookup";
    final int soTimeout = getInt(properties, "socket.timeout.ms", 30000);
    final int bufferSize = getInt(properties, "socket.receive.buffer.bytes", 65536);
    Random rnd = new Random();
    retryLoop: for (int retry = 0; retry < numRetries; retry++) {
        // we pick a seed broker randomly to avoid overloading the first broker with all the requests when the
        // parallel source instances start. Still, we try all available brokers.
        int index = rnd.nextInt(seedBrokers.length);
        brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokers.length; arrIdx++) {
            String seedBroker = seedBrokers[index];
            LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBroker, retry, numRetries);
            if (++index == seedBrokers.length) {
                index = 0;
            }
            URL brokerUrl = NetUtils.getCorrectHostnamePort(seedBroker);
            SimpleConsumer consumer = null;
            try {
                consumer = new SimpleConsumer(brokerUrl.getHost(), brokerUrl.getPort(), soTimeout, bufferSize, clientId);
                TopicMetadataRequest req = new TopicMetadataRequest(topics);
                kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
                List<TopicMetadata> metaData = resp.topicsMetadata();
                // clear in case we have an incomplete list from previous tries
                partitions.clear();
                for (TopicMetadata item : metaData) {
                    if (item.errorCode() != ErrorMapping.NoError()) {
                        // warn and try more brokers
                        LOG.warn("Error while getting metadata from broker " + seedBroker + " to find partitions " + "for " + topics.toString() + ". Error: " + ErrorMapping.exceptionFor(item.errorCode()).getMessage());
                        continue brokersLoop;
                    }
                    if (!topics.contains(item.topic())) {
                        LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");
                        continue brokersLoop;
                    }
                    for (PartitionMetadata part : item.partitionsMetadata()) {
                        Node leader = brokerToNode(part.leader());
                        KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
                        KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
                        partitions.add(pInfo);
                    }
                }
                // leave the loop through the brokers
                break retryLoop;
            } catch (Exception e) {
                //validates seed brokers in case of a ClosedChannelException
                validateSeedBrokers(seedBrokers, e);
                LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}", seedBroker, topics, e.getClass().getName(), e.getMessage());
                LOG.debug("Detailed trace", e);
                // we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
                try {
                    Thread.sleep(500);
                } catch (InterruptedException e1) {
                // sleep shorter.
                }
            } finally {
                if (consumer != null) {
                    consumer.close();
                }
            }
        }
    // brokers loop
    }
    // retries loop
    return partitions;
}
Also used : TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) URL(java.net.URL) ClosedChannelException(java.nio.channels.ClosedChannelException) UnknownHostException(java.net.UnknownHostException) TopicMetadata(kafka.javaapi.TopicMetadata) Random(java.util.Random) KafkaTopicPartitionLeader(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionLeader) PartitionMetadata(kafka.javaapi.PartitionMetadata) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 12 with SimpleConsumer

use of kafka.javaapi.consumer.SimpleConsumer in project voltdb by VoltDB.

the class KafkaStreamImporterConfig method getConfigsForPartitions.

private static Map<URI, KafkaStreamImporterConfig> getConfigsForPartitions(String key, List<HostAndPort> brokerList, final String topic, String groupId, String procedure, int soTimeout, int fetchSize, String commitPolicy, FormatterBuilder formatterBuilder) {
    SimpleConsumer consumer = null;
    Map<URI, KafkaStreamImporterConfig> configs = new HashMap<>();
    List<FailedMetaDataAttempt> attempts = new ArrayList<>();
    Iterator<HostAndPort> hpitr = brokerList.iterator();
    while (configs.isEmpty() && hpitr.hasNext()) {
        HostAndPort hp = hpitr.next();
        try {
            consumer = new SimpleConsumer(hp.getHost(), hp.getPort(), soTimeout, fetchSize, CLIENT_ID);
            TopicMetadataRequest req = new TopicMetadataRequest(singletonList(topic));
            kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
            List<TopicMetadata> metaData = resp.topicsMetadata();
            if (metaData == null) {
                attempts.add(new FailedMetaDataAttempt("Failed to get topic metadata for topic " + topic + " from host " + hp.getHost(), null));
                closeConsumer(consumer);
                consumer = null;
                continue;
            }
            int partitionCount = 0;
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    ++partitionCount;
                    URI uri;
                    try {
                        uri = new URI("kafka", key, topic + "/partition/" + part.partitionId());
                    } catch (URISyntaxException ex) {
                        // Should not happen
                        throw new KafkaConfigurationException("unable to create topic resource URI", ex);
                    }
                    Broker leader = part.leader();
                    if (leader == null) {
                        attempts.add(new FailedMetaDataAttempt("Failed to get leader broker for topic " + topic + " partition " + part.partitionId() + " from host " + hp.getHost(), null));
                        continue;
                    }
                    KafkaStreamImporterConfig config = new KafkaStreamImporterConfig(uri, brokerList, topic, part.partitionId(), new HostAndPort(leader.host(), leader.port()), groupId, fetchSize, soTimeout, procedure, commitPolicy, formatterBuilder);
                    configs.put(uri, config);
                }
            }
            if (configs.size() != partitionCount) {
                configs.clear();
                closeConsumer(consumer);
                consumer = null;
            }
        } catch (Exception e) {
            attempts.add(new FailedMetaDataAttempt("Failed to send topic metadata request for topic " + topic + " from host " + hp.getHost(), e));
        } finally {
            closeConsumer(consumer);
        }
    }
    if (!attempts.isEmpty()) {
        attempts.forEach((attempt) -> {
            attempt.log();
        });
        attempts.clear();
        if (configs.isEmpty()) {
            throw new KafkaConfigurationException("Failed to get topic metadata for %s", topic);
        }
    }
    return configs;
}
Also used : Broker(kafka.cluster.Broker) HashMap(java.util.HashMap) TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) ArrayList(java.util.ArrayList) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) URISyntaxException(java.net.URISyntaxException) ImportBaseException(org.voltdb.importclient.ImportBaseException) TopicMetadata(kafka.javaapi.TopicMetadata) PartitionMetadata(kafka.javaapi.PartitionMetadata) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 13 with SimpleConsumer

use of kafka.javaapi.consumer.SimpleConsumer in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method resetLeader.

private void resetLeader() {
    KafkaStreamImporterConfig.closeConsumer(m_consumer);
    m_consumer = null;
    HostAndPort leaderBroker = findNewLeader();
    if (leaderBroker == null) {
        //point to original leader which will fail and we fall back again here.
        rateLimitedLog(Level.WARN, null, "Fetch Failed to find leader continue with old leader: " + m_config.getPartitionLeader());
        leaderBroker = m_config.getPartitionLeader();
    } else {
        if (!leaderBroker.equals(m_config.getPartitionLeader())) {
            info(null, "Fetch Found new leader for " + m_topicAndPartition + " New Leader: " + leaderBroker);
            m_config.setPartitionLeader(leaderBroker);
        }
    }
    m_consumer = new SimpleConsumer(leaderBroker.getHost(), leaderBroker.getPort(), m_config.getSocketTimeout(), m_config.getFetchSize(), KafkaStreamImporterConfig.CLIENT_ID);
}
Also used : HostAndPort(org.voltdb.importclient.kafka.KafkaStreamImporterConfig.HostAndPort) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 14 with SimpleConsumer

use of kafka.javaapi.consumer.SimpleConsumer in project heron by twitter.

the class StaticPartitionConnections method getConsumer.

public SimpleConsumer getConsumer(int partition) {
    if (!kafka.containsKey(partition)) {
        Broker hp = hosts.getPartitionInformation().getBrokerFor(partition);
        kafka.put(partition, new SimpleConsumer(hp.host, hp.port, config.socketTimeoutMs, config.bufferSizeBytes, config.clientId));
    }
    return kafka.get(partition);
}
Also used : SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 15 with SimpleConsumer

use of kafka.javaapi.consumer.SimpleConsumer in project heron by twitter.

the class KafkaUtilsTest method setup.

@Before
public void setup() {
    broker = new KafkaTestBroker();
    GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
    globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
    brokerHosts = new StaticHosts(globalPartitionInformation);
    config = new KafkaConfig(brokerHosts, TEST_TOPIC);
    simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
}
Also used : GlobalPartitionInformation(org.apache.storm.kafka.trident.GlobalPartitionInformation) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer) Before(org.junit.Before)

Aggregations

SimpleConsumer (kafka.javaapi.consumer.SimpleConsumer)35 PartitionMetadata (kafka.javaapi.PartitionMetadata)6 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 FetchRequest (kafka.api.FetchRequest)5 FetchResponse (kafka.javaapi.FetchResponse)5 TopicMetadata (kafka.javaapi.TopicMetadata)5 TopicMetadataRequest (kafka.javaapi.TopicMetadataRequest)5 ArrayList (java.util.ArrayList)4 FetchRequestBuilder (kafka.api.FetchRequestBuilder)4 TopicAndPartition (kafka.common.TopicAndPartition)4 ByteBufferMessageSet (kafka.javaapi.message.ByteBufferMessageSet)4 Before (org.junit.Before)4 ConnectException (java.net.ConnectException)3 SocketTimeoutException (java.net.SocketTimeoutException)3 UnresolvedAddressException (java.nio.channels.UnresolvedAddressException)3 List (java.util.List)3 Map (java.util.Map)3 PartitionOffsetRequestInfo (kafka.api.PartitionOffsetRequestInfo)3 KafkaException (kafka.common.KafkaException)3