Search in sources :

Example 31 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project druid by druid-io.

the class KafkaSupervisor method getKafkaConsumer.

private KafkaConsumer<byte[], byte[]> getKafkaConsumer() {
    final Properties props = new Properties();
    props.setProperty("metadata.max.age.ms", "10000");
    props.setProperty("group.id", String.format("kafka-supervisor-%s", getRandomId()));
    props.putAll(ioConfig.getConsumerProperties());
    props.setProperty("enable.auto.commit", "false");
    ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        return new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
    } finally {
        Thread.currentThread().setContextClassLoader(currCtxCl);
    }
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Example 32 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class KafkaConsumerTest method testInvalidSocketSendBufferSize.

@Test(expected = KafkaException.class)
public void testInvalidSocketSendBufferSize() throws Exception {
    Map<String, Object> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    config.put(ConsumerConfig.SEND_BUFFER_CONFIG, -2);
    new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.Test)

Example 33 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by axbaretto.

the class KafkaGroupScan method init.

/**
 * Computes work per topic partition, based on start and end offset of each
 * corresponding topicPartition
 */
private void init() {
    partitionWorkList = Lists.newArrayList();
    Collection<DrillbitEndpoint> endpoints = kafkaStoragePlugin.getContext().getBits();
    Map<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
    for (DrillbitEndpoint endpoint : endpoints) {
        endpointMap.put(endpoint.getAddress(), endpoint);
    }
    Map<TopicPartition, Long> startOffsetsMap = Maps.newHashMap();
    Map<TopicPartition, Long> endOffsetsMap = Maps.newHashMap();
    List<PartitionInfo> topicPartitions = null;
    String topicName = kafkaScanSpec.getTopicName();
    try (KafkaConsumer<?, ?> kafkaConsumer = new KafkaConsumer<>(kafkaStoragePlugin.getConfig().getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
        if (!kafkaConsumer.listTopics().keySet().contains(topicName)) {
            throw UserException.dataReadError().message("Table '%s' does not exist", topicName).build(logger);
        }
        kafkaConsumer.subscribe(Arrays.asList(topicName));
        // based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
        // evaluates lazily, seeking to the first/last offset in all partitions only
        // when poll(long) or
        // position(TopicPartition) are called
        kafkaConsumer.poll(0);
        Set<TopicPartition> assignments = kafkaConsumer.assignment();
        topicPartitions = kafkaConsumer.partitionsFor(topicName);
        // fetch start offsets for each topicPartition
        kafkaConsumer.seekToBeginning(assignments);
        for (TopicPartition topicPartition : assignments) {
            startOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
        }
        // fetch end offsets for each topicPartition
        kafkaConsumer.seekToEnd(assignments);
        for (TopicPartition topicPartition : assignments) {
            endOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
        }
    } catch (Exception e) {
        throw UserException.dataReadError(e).message("Failed to fetch start/end offsets of the topic  %s", topicName).addContext(e.getMessage()).build(logger);
    }
    // computes work for each end point
    for (PartitionInfo partitionInfo : topicPartitions) {
        TopicPartition topicPartition = new TopicPartition(topicName, partitionInfo.partition());
        long lastCommittedOffset = startOffsetsMap.get(topicPartition);
        long latestOffset = endOffsetsMap.get(topicPartition);
        logger.debug("Latest offset of {} is {}", topicPartition, latestOffset);
        logger.debug("Last committed offset of {} is {}", topicPartition, lastCommittedOffset);
        PartitionScanWork work = new PartitionScanWork(topicPartition, lastCommittedOffset, latestOffset);
        Node[] inSyncReplicas = partitionInfo.inSyncReplicas();
        for (Node isr : inSyncReplicas) {
            String host = isr.host();
            DrillbitEndpoint ep = endpointMap.get(host);
            if (ep != null) {
                work.getByteMap().add(ep, work.getTotalBytes());
            }
        }
        partitionWorkList.add(work);
    }
}
Also used : Node(org.apache.kafka.common.Node) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) UserException(org.apache.drill.common.exceptions.UserException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Example 34 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by axbaretto.

the class KafkaQueriesTest method fetchOffsets.

private Map<TopicPartition, Long> fetchOffsets(int flag) {
    KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(storagePluginConfig.getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer());
    Map<TopicPartition, Long> offsetsMap = Maps.newHashMap();
    kafkaConsumer.subscribe(Collections.singletonList(TestQueryConstants.JSON_TOPIC));
    // based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
    // evaluates lazily, seeking to the
    // first/last offset in all partitions only when poll(long) or
    // position(TopicPartition) are called
    kafkaConsumer.poll(0);
    Set<TopicPartition> assignments = kafkaConsumer.assignment();
    try {
        if (flag == -2) {
            // fetch start offsets for each topicPartition
            kafkaConsumer.seekToBeginning(assignments);
            for (TopicPartition topicPartition : assignments) {
                offsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
            }
        } else if (flag == -1) {
            // fetch end offsets for each topicPartition
            kafkaConsumer.seekToEnd(assignments);
            for (TopicPartition topicPartition : assignments) {
                offsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
            }
        } else {
            throw new RuntimeException(String.format("Unsupported flag %d", flag));
        }
    } finally {
        kafkaConsumer.close();
    }
    return offsetsMap;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Example 35 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testConstructorClose.

@Test
public void testConstructorClose() throws Exception {
    Properties props = new Properties();
    props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
    props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj");
    props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
    final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
    final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
    try {
        new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
        Assert.fail("should have caught an exception and returned");
    } catch (KafkaException e) {
        assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
        assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
        assertEquals("Failed to construct kafka consumer", e.getMessage());
    }
}
Also used : MockMetricsReporter(org.apache.kafka.test.MockMetricsReporter) KafkaException(org.apache.kafka.common.KafkaException) Properties(java.util.Properties) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.Test)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7