Search in sources :

Example 11 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.

the class BrokerCompatibilityTest method loopUntilRecordReceived.

private static void loopUntilRecordReceived(final String kafka) {
    final Properties consumerProperties = new Properties();
    consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "broker-compatibility-consumer");
    consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Collections.singletonList(SINK_TOPIC));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            if (record.key().equals("key") && record.value().equals("value")) {
                consumer.close();
                return;
            }
        }
    }
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties)

Example 12 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project kafka by apache.

the class SmokeTestDriver method verify.

public static void verify(String kafka, Map<String, Set<Integer>> allData, int maxRecordsPerKey) {
    Properties props = new Properties();
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> partitions = getAllPartitions(consumer, "echo", "max", "min", "dif", "sum", "cnt", "avg", "wcnt", "tagg");
    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);
    final int recordsGenerated = allData.size() * maxRecordsPerKey;
    int recordsProcessed = 0;
    HashMap<String, Integer> max = new HashMap<>();
    HashMap<String, Integer> min = new HashMap<>();
    HashMap<String, Integer> dif = new HashMap<>();
    HashMap<String, Long> sum = new HashMap<>();
    HashMap<String, Long> cnt = new HashMap<>();
    HashMap<String, Double> avg = new HashMap<>();
    HashMap<String, Long> wcnt = new HashMap<>();
    HashMap<String, Long> tagg = new HashMap<>();
    HashSet<String> keys = new HashSet<>();
    HashMap<String, Set<Integer>> received = new HashMap<>();
    for (String key : allData.keySet()) {
        keys.add(key);
        received.put(key, new HashSet<Integer>());
    }
    int retry = 0;
    final long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < TimeUnit.MINUTES.toMillis(3)) {
        ConsumerRecords<byte[], byte[]> records = consumer.poll(500);
        if (records.isEmpty() && recordsProcessed >= recordsGenerated) {
            if (verifyMin(min, allData, false) && verifyMax(max, allData, false) && verifyDif(dif, allData, false) && verifySum(sum, allData, false) && verifyCnt(cnt, allData, false) && verifyAvg(avg, allData, false) && verifyTAgg(tagg, allData, false)) {
                break;
            }
            if (retry++ > MAX_RECORD_EMPTY_RETRIES) {
                break;
            }
        } else {
            for (ConsumerRecord<byte[], byte[]> record : records) {
                String key = stringSerde.deserializer().deserialize("", record.key());
                switch(record.topic()) {
                    case "echo":
                        Integer value = intSerde.deserializer().deserialize("", record.value());
                        recordsProcessed++;
                        if (recordsProcessed % 100 == 0) {
                            System.out.println("Echo records processed = " + recordsProcessed);
                        }
                        received.get(key).add(value);
                        break;
                    case "min":
                        min.put(key, intSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "max":
                        max.put(key, intSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "dif":
                        dif.put(key, intSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "sum":
                        sum.put(key, longSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "cnt":
                        cnt.put(key, longSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "avg":
                        avg.put(key, doubleSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "wcnt":
                        wcnt.put(key, longSerde.deserializer().deserialize("", record.value()));
                        break;
                    case "tagg":
                        tagg.put(key, longSerde.deserializer().deserialize("", record.value()));
                        break;
                    default:
                        System.out.println("unknown topic: " + record.topic());
                }
            }
        }
    }
    consumer.close();
    final long finished = System.currentTimeMillis() - start;
    System.out.println("Verification time=" + finished);
    System.out.println("-------------------");
    System.out.println("Result Verification");
    System.out.println("-------------------");
    System.out.println("recordGenerated=" + recordsGenerated);
    System.out.println("recordProcessed=" + recordsProcessed);
    if (recordsProcessed > recordsGenerated) {
        System.out.println("PROCESSED-MORE-THAN-GENERATED");
    } else if (recordsProcessed < recordsGenerated) {
        System.out.println("PROCESSED-LESS-THAN-GENERATED");
    }
    boolean success;
    success = allData.equals(received);
    if (success) {
        System.out.println("ALL-RECORDS-DELIVERED");
    } else {
        int missedCount = 0;
        for (Map.Entry<String, Set<Integer>> entry : allData.entrySet()) {
            missedCount += received.get(entry.getKey()).size();
        }
        System.out.println("missedRecords=" + missedCount);
    }
    success &= verifyMin(min, allData, true);
    success &= verifyMax(max, allData, true);
    success &= verifyDif(dif, allData, true);
    success &= verifySum(sum, allData, true);
    success &= verifyCnt(cnt, allData, true);
    success &= verifyAvg(avg, allData, true);
    success &= verifyTAgg(tagg, allData, true);
    System.out.println(success ? "SUCCESS" : "FAILURE");
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Properties(java.util.Properties) HashSet(java.util.HashSet) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map)

Example 13 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project apex-malhar by apache.

the class KafkaSinglePortExactlyOnceOutputOperator method KafkaConsumerInit.

private KafkaConsumer KafkaConsumerInit() {
    Properties props = new Properties();
    props.put(BOOTSTRAP_SERVERS_CONFIG, getProperties().get(BOOTSTRAP_SERVERS_CONFIG));
    props.put(KEY_DESERIALIZER_CLASS_CONFIG, KEY_DESERIALIZER);
    props.put(VALUE_DESERIALIZER_CLASS_CONFIG, getProperties().get(VALUE_DESERIALIZER_CLASS_CONFIG));
    return new KafkaConsumer<>(props);
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties)

Example 14 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project ignite by apache.

the class IgniteSourceConnectorTest method checkDataDelivered.

/**
 * Checks if events were delivered to Kafka server.
 *
 * @param expectedEventsCnt Expected events count.
 * @throws Exception If failed.
 */
private void checkDataDelivered(final int expectedEventsCnt) throws Exception {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-grp");
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);
    props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.ignite.stream.kafka.connect.serialization.CacheEventDeserializer");
    final KafkaConsumer<String, CacheEvent> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(TOPICS));
    final AtomicInteger evtCnt = new AtomicInteger();
    try {
        // Wait for expected events count.
        GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
                for (ConsumerRecord<String, CacheEvent> record : records) {
                    info("Record: " + record);
                    evtCnt.getAndIncrement();
                }
                return evtCnt.get() >= expectedEventsCnt;
            }
        }, 20_000);
        info("Waiting for unexpected records for 5 secs.");
        assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
                for (ConsumerRecord<String, CacheEvent> record : records) {
                    error("Unexpected record: " + record);
                    evtCnt.getAndIncrement();
                }
                return evtCnt.get() > expectedEventsCnt;
            }
        }, 5_000));
    } catch (WakeupException ignored) {
    // ignore for shutdown.
    } finally {
        consumer.close();
        assertEquals(expectedEventsCnt, evtCnt.get());
    }
}
Also used : GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) WakeupException(org.apache.kafka.common.errors.WakeupException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CacheEvent(org.apache.ignite.events.CacheEvent)

Example 15 with KafkaConsumer

use of org.apache.kafka.clients.consumer.KafkaConsumer in project streamsx.kafka by IBMStreams.

the class TransactionalKafkaProducerClient method getCommittedSequenceIdFromCtrlTopic.

@SuppressWarnings("rawtypes")
private long getCommittedSequenceIdFromCtrlTopic() throws Exception {
    KafkaConsumer<?, ?> consumer = new KafkaConsumer<>(getConsumerProperties());
    HashMap<TopicPartition, Long> endOffsets = getControlTopicEndOffsets(consumer);
    // move the consumer to initial offset to begin consuming from
    consumer.assign(controlTopicInitialOffsets.keySet());
    controlTopicInitialOffsets.forEach((tp, offset) -> {
        consumer.seek(tp, offset);
    });
    long committedSeqId = 0;
    boolean consumerAtEnd = false;
    while (!consumerAtEnd) {
        ConsumerRecords<?, ?> records = consumer.poll(1000);
        if (logger.isDebugEnabled())
            logger.debug("ConsumerRecords: " + records);
        Iterator<?> it = records.iterator();
        // Records from different partitions can be scrambled. So we cannot assume that the last record returned by the iterator contains the last committed sequence-ID.
        while (it.hasNext()) {
            ConsumerRecord record = (ConsumerRecord) it.next();
            Headers headers = record.headers();
            if (logger.isDebugEnabled())
                logger.debug("Headers: " + headers);
            String tid = new String(headers.lastHeader(TRANSACTION_ID).value(), StandardCharsets.UTF_8);
            if (logger.isDebugEnabled())
                logger.debug("Checking tid=" + tid + " (currentTid=" + getTransactionalId() + "); from " + record.topic() + "-" + record.partition());
            if (tid.equals(getTransactionalId())) {
                long decodedSeqId = Long.valueOf(new String(headers.lastHeader(COMMITTED_SEQUENCE_ID).value(), StandardCharsets.UTF_8));
                if (decodedSeqId > committedSeqId)
                    committedSeqId = decodedSeqId;
            }
        }
        consumerAtEnd = isConsumerAtEnd(consumer, endOffsets);
        if (logger.isDebugEnabled())
            logger.debug("consumerAtEnd=" + consumerAtEnd);
    }
    consumer.close(1l, TimeUnit.SECONDS);
    return committedSeqId;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Headers(org.apache.kafka.common.header.Headers) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)15 Properties (java.util.Properties)12 TopicPartition (org.apache.kafka.common.TopicPartition)6 Map (java.util.Map)4 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)4 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)4 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)3 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)3 ArrayList (java.util.ArrayList)2 Iterator (java.util.Iterator)2 Set (java.util.Set)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)2 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)2 PartitionInfo (org.apache.kafka.common.PartitionInfo)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 IOException (java.io.IOException)1 Collections (java.util.Collections)1 LinkedList (java.util.LinkedList)1