Search in sources :

Example 1 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.

the class StreamsResetter method resetInputAndInternalAndSeekToEndIntermediateTopicOffsets.

private void resetInputAndInternalAndSeekToEndIntermediateTopicOffsets() {
    final List<String> inputTopics = options.valuesOf(inputTopicsOption);
    final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
    if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
        System.out.println("No input or intermediate topics specified. Skipping seek.");
        return;
    } else {
        if (inputTopics.size() != 0) {
            System.out.println("Resetting offsets to zero for input topics " + inputTopics + " and all internal topics.");
        }
        if (intermediateTopics.size() != 0) {
            System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
        }
    }
    final Properties config = new Properties();
    config.putAll(consumerConfig);
    config.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServerOption));
    config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, options.valueOf(applicationIdOption));
    config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
    for (final String topic : inputTopics) {
        if (!allTopics.contains(topic)) {
            System.err.println("Input topic " + topic + " not found. Skipping.");
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    for (final String topic : intermediateTopics) {
        if (!allTopics.contains(topic)) {
            System.err.println("Intermediate topic " + topic + " not found. Skipping.");
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    for (final String topic : allTopics) {
        if (isInternalTopic(topic)) {
            topicsToSubscribe.add(topic);
        }
    }
    try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
        client.subscribe(topicsToSubscribe);
        client.poll(1);
        final Set<TopicPartition> partitions = client.assignment();
        final Set<TopicPartition> inputAndInternalTopicPartitions = new HashSet<>();
        final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
        for (final TopicPartition p : partitions) {
            final String topic = p.topic();
            if (isInputTopic(topic) || isInternalTopic(topic)) {
                inputAndInternalTopicPartitions.add(p);
            } else if (isIntermediateTopic(topic)) {
                intermediateTopicPartitions.add(p);
            } else {
                System.err.println("Skipping invalid partition: " + p);
            }
        }
        if (inputAndInternalTopicPartitions.size() > 0) {
            client.seekToBeginning(inputAndInternalTopicPartitions);
        }
        if (intermediateTopicPartitions.size() > 0) {
            client.seekToEnd(intermediateTopicPartitions);
        }
        for (final TopicPartition p : partitions) {
            client.position(p);
        }
        client.commitSync();
    } catch (final RuntimeException e) {
        System.err.println("ERROR: Resetting offsets failed.");
        throw e;
    }
    System.out.println("Done.");
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) HashSet(java.util.HashSet)

Example 2 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.

the class VerifiableConsumer method createFromArgs.

public static VerifiableConsumer createFromArgs(ArgumentParser parser, String[] args) throws ArgumentParserException {
    Namespace res = parser.parseArgs(args);
    String topic = res.getString("topic");
    boolean useAutoCommit = res.getBoolean("useAutoCommit");
    int maxMessages = res.getInt("maxMessages");
    boolean verbose = res.getBoolean("verbose");
    String configFile = res.getString("consumer.config");
    Properties consumerProps = new Properties();
    if (configFile != null) {
        try {
            consumerProps.putAll(Utils.loadProps(configFile));
        } catch (IOException e) {
            throw new ArgumentParserException(e.getMessage(), parser);
        }
    }
    consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, res.getString("groupId"));
    consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, res.getString("brokerList"));
    consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, useAutoCommit);
    consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, res.getString("resetPolicy"));
    consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(res.getInt("sessionTimeout")));
    consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, res.getString("assignmentStrategy"));
    StringDeserializer deserializer = new StringDeserializer();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
    return new VerifiableConsumer(consumer, System.out, topic, maxMessages, useAutoCommit, false, verbose);
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) IOException(java.io.IOException) ArgumentParserException(net.sourceforge.argparse4j.inf.ArgumentParserException) Properties(java.util.Properties) Namespace(net.sourceforge.argparse4j.inf.Namespace)

Example 3 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project samza by apache.

the class TestStreamProcessor method verifyNumMessages.

/**
   * Consumes data from the topic until there are no new messages for a while
   * and asserts that the number of consumed messages is as expected.
   */
private void verifyNumMessages(String topic, int expectedNumMessages) {
    KafkaConsumer consumer = getKafkaConsumer();
    consumer.subscribe(Collections.singletonList(topic));
    int count = 0;
    int emptyPollCount = 0;
    while (count < expectedNumMessages && emptyPollCount < 5) {
        ConsumerRecords records = consumer.poll(5000);
        if (!records.isEmpty()) {
            Iterator<ConsumerRecord> iterator = records.iterator();
            while (iterator.hasNext()) {
                ConsumerRecord record = iterator.next();
                Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
                count++;
            }
        } else {
            emptyPollCount++;
        }
    }
    Assert.assertEquals(count, expectedNumMessages);
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 4 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project samza by apache.

the class TestZkStreamProcessorBase method verifyNumMessages.

/**
   * Consumes data from the topic until there are no new messages for a while
   * and asserts that the number of consumed messages is as expected.
   */
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
    KafkaConsumer consumer = getKafkaConsumer();
    consumer.subscribe(Collections.singletonList(topic));
    Map<Integer, Boolean> map = new HashMap<>(expectedValues);
    int count = 0;
    int emptyPollCount = 0;
    while (count < expectedNumMessages && emptyPollCount < 5) {
        ConsumerRecords records = consumer.poll(5000);
        if (!records.isEmpty()) {
            Iterator<ConsumerRecord> iterator = records.iterator();
            while (iterator.hasNext()) {
                ConsumerRecord record = iterator.next();
                String val = new String((byte[]) record.value());
                LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
                Integer valI = Integer.valueOf(val);
                if (valI < BAD_MESSAGE_KEY) {
                    map.put(valI, true);
                    count++;
                }
            }
        } else {
            emptyPollCount++;
            LOG.warn("empty polls " + emptyPollCount);
        }
    }
    // filter out numbers we did not get
    long numFalse = map.values().stream().filter(v -> !v).count();
    Assert.assertEquals("didn't get this number of events ", 0, numFalse);
    Assert.assertEquals(expectedNumMessages, count);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) JobCoordinator(org.apache.samza.coordinator.JobCoordinator) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) TestZkUtils(org.apache.samza.zk.TestZkUtils) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InitableTask(org.apache.samza.task.InitableTask) MessageCollector(org.apache.samza.task.MessageCollector) SystemStream(org.apache.samza.system.SystemStream) Map(java.util.Map) StreamTask(org.apache.samza.task.StreamTask) ApplicationConfig(org.apache.samza.config.ApplicationConfig) StandaloneTestUtils(org.apache.samza.test.StandaloneTestUtils) MapConfig(org.apache.samza.config.MapConfig) Before(org.junit.Before) TaskContext(org.apache.samza.task.TaskContext) Util(org.apache.samza.util.Util) Properties(java.util.Properties) IncomingMessageEnvelope(org.apache.samza.system.IncomingMessageEnvelope) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) StreamTaskFactory(org.apache.samza.task.StreamTaskFactory) ZkConfig(org.apache.samza.config.ZkConfig) TaskCoordinator(org.apache.samza.task.TaskCoordinator) ExecutionException(java.util.concurrent.ExecutionException) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TestUtils(kafka.utils.TestUtils) OutgoingMessageEnvelope(org.apache.samza.system.OutgoingMessageEnvelope) JobCoordinatorConfig(org.apache.samza.config.JobCoordinatorConfig) Config(org.apache.samza.config.Config) StandaloneIntegrationTestHarness(org.apache.samza.test.StandaloneIntegrationTestHarness) Assert(org.junit.Assert) Collections(java.util.Collections) JobCoordinatorFactory(org.apache.samza.coordinator.JobCoordinatorFactory) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) HashMap(java.util.HashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 5 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.

the class ClientCompatibilityTest method testConsume.

public void testConsume(final long prodTimeMs) throws Exception {
    Properties consumerProps = new Properties();
    consumerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
    consumerProps.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 512);
    ClientCompatibilityTestDeserializer deserializer = new ClientCompatibilityTestDeserializer(testConfig.expectClusterId);
    final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
    final List<PartitionInfo> partitionInfos = consumer.partitionsFor(testConfig.topic);
    if (partitionInfos.size() < 1)
        throw new RuntimeException("Expected at least one partition for topic " + testConfig.topic);
    final Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
    final LinkedList<TopicPartition> topicPartitions = new LinkedList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
        timestampsToSearch.put(topicPartition, prodTimeMs);
        topicPartitions.add(topicPartition);
    }
    final OffsetsForTime offsetsForTime = new OffsetsForTime();
    tryFeature("offsetsForTimes", testConfig.offsetsForTimesSupported, new Runnable() {

        @Override
        public void run() {
            offsetsForTime.result = consumer.offsetsForTimes(timestampsToSearch);
        }
    }, new Runnable() {

        @Override
        public void run() {
            log.info("offsetsForTime = {}", offsetsForTime.result);
        }
    });
    // Whether or not offsetsForTimes works, beginningOffsets and endOffsets
    // should work.
    consumer.beginningOffsets(timestampsToSearch.keySet());
    consumer.endOffsets(timestampsToSearch.keySet());
    consumer.assign(topicPartitions);
    consumer.seekToBeginning(topicPartitions);
    final Iterator<byte[]> iter = new Iterator<byte[]>() {

        private static final int TIMEOUT_MS = 10000;

        private Iterator<ConsumerRecord<byte[], byte[]>> recordIter = null;

        private byte[] next = null;

        private byte[] fetchNext() {
            while (true) {
                long curTime = Time.SYSTEM.milliseconds();
                if (curTime - prodTimeMs > TIMEOUT_MS)
                    throw new RuntimeException("Timed out after " + TIMEOUT_MS + " ms.");
                if (recordIter == null) {
                    ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
                    recordIter = records.iterator();
                }
                if (recordIter.hasNext())
                    return recordIter.next().value();
                recordIter = null;
            }
        }

        @Override
        public boolean hasNext() {
            if (next != null)
                return true;
            next = fetchNext();
            return next != null;
        }

        @Override
        public byte[] next() {
            if (!hasNext())
                throw new NoSuchElementException();
            byte[] cur = next;
            next = null;
            return cur;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
    byte[] next = iter.next();
    try {
        compareArrays(message1, next);
        log.debug("Found first message...");
    } catch (RuntimeException e) {
        throw new RuntimeException("The first message in this topic was not ours. Please use a new topic when " + "running this program.");
    }
    try {
        next = iter.next();
        if (testConfig.expectRecordTooLargeException)
            throw new RuntimeException("Expected to get a RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
        try {
            compareArrays(message2, next);
        } catch (RuntimeException e) {
            System.out.println("The second message in this topic was not ours. Please use a new " + "topic when running this program.");
            Exit.exit(1);
        }
    } catch (RecordTooLargeException e) {
        log.debug("Got RecordTooLargeException", e);
        if (!testConfig.expectRecordTooLargeException)
            throw new RuntimeException("Got an unexpected RecordTooLargeException when reading a record " + "bigger than " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG);
    }
    log.debug("Closing consumer.");
    consumer.close();
    log.info("Closed consumer.");
}
Also used : HashMap(java.util.HashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) LinkedList(java.util.LinkedList) TopicPartition(org.apache.kafka.common.TopicPartition) Iterator(java.util.Iterator) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) NoSuchElementException(java.util.NoSuchElementException)

Aggregations

KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)11 Properties (java.util.Properties)10 TopicPartition (org.apache.kafka.common.TopicPartition)4 HashMap (java.util.HashMap)3 Map (java.util.Map)3 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)3 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)3 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)3 HashSet (java.util.HashSet)2 Iterator (java.util.Iterator)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 IOException (java.io.IOException)1 Collections (java.util.Collections)1 LinkedList (java.util.LinkedList)1 NoSuchElementException (java.util.NoSuchElementException)1 Set (java.util.Set)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 ExecutionException (java.util.concurrent.ExecutionException)1