Search in sources :

Example 56 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class KafkaConsumerTest method testOsDefaultSocketBufferSizes.

@Test
public void testOsDefaultSocketBufferSizes() {
    Map<String, Object> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
    config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
    KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
    consumer.close();
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.jupiter.api.Test)

Example 57 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testListOffsetsSendsIsolationLevel.

private void testListOffsetsSendsIsolationLevel(IsolationLevel isolationLevel) {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, isolationLevel);
    assignFromUser(singleton(tp0));
    subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
    client.prepareResponse(body -> {
        ListOffsetsRequest request = (ListOffsetsRequest) body;
        return request.isolationLevel() == isolationLevel;
    }, listOffsetResponse(Errors.NONE, 1L, 5L));
    fetcher.resetOffsetsIfNeeded();
    consumerClient.pollNoWakeup();
    assertFalse(subscriptions.isOffsetResetNeeded(tp0));
    assertTrue(subscriptions.isFetchable(tp0));
    assertEquals(5, subscriptions.position(tp0).offset);
}
Also used : ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest)

Example 58 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testEmptyControlBatch.

@Test
public void testEmptyControlBatch() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 1;
    // Empty control batch should not cause an exception
    DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.MAGIC_VALUE_V2, 1L, (short) 0, -1, 0, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH, TimestampType.CREATE_TIME, time.milliseconds(), true, true);
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
    commitTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(body -> {
        FetchRequest request = (FetchRequest) body;
        assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
        return true;
    }, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    assertEquals(fetchedRecords.get(tp0).size(), 2);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 59 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by apache.

the class KafkaGroupScan method init.

/**
 * Computes work per topic partition, based on start and end offset of each
 * corresponding topicPartition
 */
private void init() {
    partitionWorkMap = Maps.newHashMap();
    Collection<DrillbitEndpoint> endpoints = kafkaStoragePlugin.getContext().getBits();
    Map<String, DrillbitEndpoint> endpointMap = endpoints.stream().collect(Collectors.toMap(DrillbitEndpoint::getAddress, Function.identity(), (o, n) -> n));
    Map<TopicPartition, Long> startOffsetsMap = Maps.newHashMap();
    Map<TopicPartition, Long> endOffsetsMap = Maps.newHashMap();
    List<PartitionInfo> topicPartitions;
    String topicName = kafkaScanSpec.getTopicName();
    KafkaConsumer<?, ?> kafkaConsumer = null;
    try {
        kafkaConsumer = new KafkaConsumer<>(kafkaStoragePlugin.getConfig().getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer());
        if (!kafkaConsumer.listTopics().containsKey(topicName)) {
            throw UserException.dataReadError().message("Table '%s' does not exist", topicName).build(logger);
        }
        kafkaConsumer.subscribe(Collections.singletonList(topicName));
        // based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
        // evaluates lazily, seeking to the first/last offset in all partitions only
        // when poll(long) or
        // position(TopicPartition) are called
        kafkaConsumer.poll(Duration.ofSeconds(5));
        Set<TopicPartition> assignments = waitForConsumerAssignment(kafkaConsumer);
        topicPartitions = kafkaConsumer.partitionsFor(topicName);
        // fetch start offsets for each topicPartition
        kafkaConsumer.seekToBeginning(assignments);
        for (TopicPartition topicPartition : assignments) {
            startOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
        }
        // fetch end offsets for each topicPartition
        kafkaConsumer.seekToEnd(assignments);
        for (TopicPartition topicPartition : assignments) {
            endOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
        }
    } catch (Exception e) {
        throw UserException.dataReadError(e).message("Failed to fetch start/end offsets of the topic %s", topicName).addContext(e.getMessage()).build(logger);
    } finally {
        kafkaStoragePlugin.registerToClose(kafkaConsumer);
    }
    // computes work for each end point
    for (PartitionInfo partitionInfo : topicPartitions) {
        TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
        long lastCommittedOffset = startOffsetsMap.get(topicPartition);
        long latestOffset = endOffsetsMap.get(topicPartition);
        logger.debug("Latest offset of {} is {}", topicPartition, latestOffset);
        logger.debug("Last committed offset of {} is {}", topicPartition, lastCommittedOffset);
        KafkaPartitionScanSpec partitionScanSpec = new KafkaPartitionScanSpec(topicPartition.topic(), topicPartition.partition(), lastCommittedOffset, latestOffset);
        PartitionScanWork work = new PartitionScanWork(new EndpointByteMapImpl(), partitionScanSpec);
        Node[] inSyncReplicas = partitionInfo.inSyncReplicas();
        for (Node isr : inSyncReplicas) {
            String host = isr.host();
            DrillbitEndpoint ep = endpointMap.get(host);
            if (ep != null) {
                work.getByteMap().add(ep, work.getTotalBytes());
            }
        }
        partitionWorkMap.put(topicPartition, work);
    }
}
Also used : StoragePluginRegistry(org.apache.drill.exec.store.StoragePluginRegistry) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) UserException(org.apache.drill.common.exceptions.UserException) LoggerFactory(org.slf4j.LoggerFactory) Sets(org.apache.drill.shaded.guava.com.google.common.collect.Sets) AbstractGroupScan(org.apache.drill.exec.physical.base.AbstractGroupScan) Function(java.util.function.Function) StringUtils(org.apache.commons.lang3.StringUtils) HashSet(java.util.HashSet) GroupScanProperty(org.apache.drill.exec.physical.base.ScanStats.GroupScanProperty) Maps(org.apache.drill.shaded.guava.com.google.common.collect.Maps) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) JsonTypeName(com.fasterxml.jackson.annotation.JsonTypeName) AffinityCreator(org.apache.drill.exec.store.schedule.AffinityCreator) ListMultimap(org.apache.drill.shaded.guava.com.google.common.collect.ListMultimap) EndpointByteMapImpl(org.apache.drill.exec.store.schedule.EndpointByteMapImpl) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) Duration(java.time.Duration) Map(java.util.Map) JsonIgnore(com.fasterxml.jackson.annotation.JsonIgnore) EndpointByteMap(org.apache.drill.exec.store.schedule.EndpointByteMap) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) Logger(org.slf4j.Logger) ScanStats(org.apache.drill.exec.physical.base.ScanStats) Collection(java.util.Collection) SchemaPath(org.apache.drill.common.expression.SchemaPath) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) CompleteWork(org.apache.drill.exec.store.schedule.CompleteWork) List(java.util.List) Lists(org.apache.drill.shaded.guava.com.google.common.collect.Lists) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) EndpointAffinity(org.apache.drill.exec.physical.EndpointAffinity) GroupScan(org.apache.drill.exec.physical.base.GroupScan) Preconditions(org.apache.drill.shaded.guava.com.google.common.base.Preconditions) Node(org.apache.kafka.common.Node) ExecConstants(org.apache.drill.exec.ExecConstants) Collections(java.util.Collections) AssignmentCreator(org.apache.drill.exec.store.schedule.AssignmentCreator) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Node(org.apache.kafka.common.Node) UserException(org.apache.drill.common.exceptions.UserException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) TopicPartition(org.apache.kafka.common.TopicPartition) EndpointByteMapImpl(org.apache.drill.exec.store.schedule.EndpointByteMapImpl) PartitionInfo(org.apache.kafka.common.PartitionInfo) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7