Search in sources :

Example 26 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class MirrorClient method remoteConsumerOffsets.

/**
 * Translate a remote consumer group's offsets into corresponding local offsets. Topics are automatically
 *  renamed according to the ReplicationPolicy.
 *  @param consumerGroupId group ID of remote consumer group
 *  @param remoteClusterAlias alias of remote cluster
 *  @param timeout timeout
 */
public Map<TopicPartition, OffsetAndMetadata> remoteConsumerOffsets(String consumerGroupId, String remoteClusterAlias, Duration timeout) {
    long deadline = System.currentTimeMillis() + timeout.toMillis();
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    try (KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
        // checkpoint topics are not "remote topics", as they are not replicated. So we don't need
        // to use ReplicationPolicy to create the checkpoint topic here.
        String checkpointTopic = replicationPolicy.checkpointsTopic(remoteClusterAlias);
        List<TopicPartition> checkpointAssignment = Collections.singletonList(new TopicPartition(checkpointTopic, 0));
        consumer.assign(checkpointAssignment);
        consumer.seekToBeginning(checkpointAssignment);
        while (System.currentTimeMillis() < deadline && !endOfStream(consumer, checkpointAssignment)) {
            ConsumerRecords<byte[], byte[]> records = consumer.poll(timeout);
            for (ConsumerRecord<byte[], byte[]> record : records) {
                try {
                    Checkpoint checkpoint = Checkpoint.deserializeRecord(record);
                    if (checkpoint.consumerGroupId().equals(consumerGroupId)) {
                        offsets.put(checkpoint.topicPartition(), checkpoint.offsetAndMetadata());
                    }
                } catch (SchemaException e) {
                    log.info("Could not deserialize record. Skipping.", e);
                }
            }
        }
        log.info("Consumed {} checkpoint records for {} from {}.", offsets.size(), consumerGroupId, checkpointTopic);
    }
    return offsets;
}
Also used : SchemaException(org.apache.kafka.common.protocol.types.SchemaException) HashMap(java.util.HashMap) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer)

Example 27 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class RegexSourceIntegrationTest method testRegexMatchesTopicsAWhenDeleted.

@Test
public void testRegexMatchesTopicsAWhenDeleted() throws Exception {
    final Serde<String> stringSerde = Serdes.String();
    final List<String> expectedFirstAssignment = Arrays.asList("TEST-TOPIC-A", "TEST-TOPIC-B");
    final List<String> expectedSecondAssignment = Collections.singletonList("TEST-TOPIC-B");
    final List<String> assignedTopics = new CopyOnWriteArrayList<>();
    try {
        CLUSTER.createTopics("TEST-TOPIC-A", "TEST-TOPIC-B");
        final StreamsBuilder builder = new StreamsBuilder();
        final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-[A-Z]"));
        pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
        streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {

            @Override
            public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
                return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {

                    @Override
                    public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
                        super.subscribe(topics, new TheConsumerRebalanceListener(assignedTopics, listener));
                    }
                };
            }
        });
        streams.start();
        TestUtils.waitForCondition(() -> assignedTopics.equals(expectedFirstAssignment), STREAM_TASKS_NOT_UPDATED);
    } finally {
        CLUSTER.deleteTopic("TEST-TOPIC-A");
    }
    TestUtils.waitForCondition(() -> assignedTopics.equals(expectedSecondAssignment), STREAM_TASKS_NOT_UPDATED);
}
Also used : Pattern(java.util.regex.Pattern) KafkaStreams(org.apache.kafka.streams.KafkaStreams) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 28 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class RegexSourceIntegrationTest method testRegexMatchesTopicsAWhenCreated.

@Test
public void testRegexMatchesTopicsAWhenCreated() throws Exception {
    try {
        final Serde<String> stringSerde = Serdes.String();
        final List<String> expectedFirstAssignment = Collections.singletonList("TEST-TOPIC-1");
        // we compare lists of subscribed topics and hence requiring the order as well; this is guaranteed
        // with KIP-429 since we would NOT revoke TEST-TOPIC-1 but only add TEST-TOPIC-2 so the list is always
        // in the order of "TEST-TOPIC-1, TEST-TOPIC-2". Note if KIP-429 behavior ever changed it may become a flaky test
        final List<String> expectedSecondAssignment = Arrays.asList("TEST-TOPIC-1", "TEST-TOPIC-2");
        CLUSTER.createTopic("TEST-TOPIC-1");
        final StreamsBuilder builder = new StreamsBuilder();
        final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));
        pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
        final List<String> assignedTopics = new CopyOnWriteArrayList<>();
        streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {

            @Override
            public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
                return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {

                    @Override
                    public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
                        super.subscribe(topics, new TheConsumerRebalanceListener(assignedTopics, listener));
                    }
                };
            }
        });
        streams.start();
        TestUtils.waitForCondition(() -> assignedTopics.equals(expectedFirstAssignment), STREAM_TASKS_NOT_UPDATED);
        CLUSTER.createTopic("TEST-TOPIC-2");
        TestUtils.waitForCondition(() -> assignedTopics.equals(expectedSecondAssignment), STREAM_TASKS_NOT_UPDATED);
        streams.close();
    } finally {
        CLUSTER.deleteTopicsAndWait("TEST-TOPIC-1", "TEST-TOPIC-2");
    }
}
Also used : Pattern(java.util.regex.Pattern) KafkaStreams(org.apache.kafka.streams.KafkaStreams) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Map(java.util.Map) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 29 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class StreamsResetter method maybeResetInputAndSeekToEndIntermediateTopicOffsets.

private int maybeResetInputAndSeekToEndIntermediateTopicOffsets(final Map<Object, Object> consumerConfig, final boolean dryRun) throws IOException, ParseException {
    final List<String> inputTopics = options.valuesOf(inputTopicsOption);
    final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
    int topicNotFound = EXIT_CODE_SUCCESS;
    final List<String> notFoundInputTopics = new ArrayList<>();
    final List<String> notFoundIntermediateTopics = new ArrayList<>();
    final String groupId = options.valueOf(applicationIdOption);
    if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
        System.out.println("No input or intermediate topics specified. Skipping seek.");
        return EXIT_CODE_SUCCESS;
    }
    if (inputTopics.size() != 0) {
        System.out.println("Reset-offsets for input topics " + inputTopics);
    }
    if (intermediateTopics.size() != 0) {
        System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
    }
    final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
    for (final String topic : inputTopics) {
        if (!allTopics.contains(topic)) {
            notFoundInputTopics.add(topic);
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    for (final String topic : intermediateTopics) {
        if (!allTopics.contains(topic)) {
            notFoundIntermediateTopics.add(topic);
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    if (!notFoundInputTopics.isEmpty()) {
        System.out.println("Following input topics are not found, skipping them");
        for (final String topic : notFoundInputTopics) {
            System.out.println("Topic: " + topic);
        }
        topicNotFound = EXIT_CODE_ERROR;
    }
    if (!notFoundIntermediateTopics.isEmpty()) {
        System.out.println("Following intermediate topics are not found, skipping them");
        for (final String topic : notFoundIntermediateTopics) {
            System.out.println("Topic:" + topic);
        }
        topicNotFound = EXIT_CODE_ERROR;
    }
    // try to poll with an empty subscription)
    if (topicsToSubscribe.isEmpty()) {
        return topicNotFound;
    }
    final Properties config = new Properties();
    config.putAll(consumerConfig);
    config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
        final Collection<TopicPartition> partitions = topicsToSubscribe.stream().map(client::partitionsFor).flatMap(Collection::stream).map(info -> new TopicPartition(info.topic(), info.partition())).collect(Collectors.toList());
        client.assign(partitions);
        final Set<TopicPartition> inputTopicPartitions = new HashSet<>();
        final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
        for (final TopicPartition p : partitions) {
            final String topic = p.topic();
            if (isInputTopic(topic)) {
                inputTopicPartitions.add(p);
            } else if (isIntermediateTopic(topic)) {
                intermediateTopicPartitions.add(p);
            } else {
                System.err.println("Skipping invalid partition: " + p);
            }
        }
        maybeReset(groupId, client, inputTopicPartitions);
        maybeSeekToEnd(groupId, client, intermediateTopicPartitions);
        if (!dryRun) {
            for (final TopicPartition p : partitions) {
                client.position(p);
            }
            client.commitSync();
        }
    } catch (final IOException | ParseException e) {
        System.err.println("ERROR: Resetting offsets failed.");
        throw e;
    }
    System.out.println("Done.");
    return topicNotFound;
}
Also used : Exit(org.apache.kafka.common.utils.Exit) HashMap(java.util.HashMap) DescribeConsumerGroupsOptions(org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) OptionException(joptsimple.OptionException) InterfaceStability(org.apache.kafka.common.annotation.InterfaceStability) CommandLineUtils(kafka.utils.CommandLineUtils) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) Duration(java.time.Duration) Map(java.util.Map) OptionParser(joptsimple.OptionParser) Admin(org.apache.kafka.clients.admin.Admin) DeleteTopicsResult(org.apache.kafka.clients.admin.DeleteTopicsResult) ParseException(java.text.ParseException) LinkedList(java.util.LinkedList) OptionSet(joptsimple.OptionSet) OptionSpec(joptsimple.OptionSpec) Consumer(org.apache.kafka.clients.consumer.Consumer) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) DescribeConsumerGroupsResult(org.apache.kafka.clients.admin.DescribeConsumerGroupsResult) Properties(java.util.Properties) Collection(java.util.Collection) RemoveMembersFromConsumerGroupOptions(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) IOException(java.io.IOException) KafkaFuture(org.apache.kafka.common.KafkaFuture) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) JavaConverters(scala.collection.JavaConverters) Optional(java.util.Optional) MemberDescription(org.apache.kafka.clients.admin.MemberDescription) OptionSpecBuilder(joptsimple.OptionSpecBuilder) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ArrayList(java.util.ArrayList) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) IOException(java.io.IOException) Properties(java.util.Properties) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) ParseException(java.text.ParseException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) HashSet(java.util.HashSet)

Example 30 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class TopologyTestDriverTest method shouldPassRecordHeadersIntoSerializersAndDeserializers.

@Test
public void shouldPassRecordHeadersIntoSerializersAndDeserializers() {
    testDriver = new TopologyTestDriver(setupSourceSinkTopology(), config);
    final AtomicBoolean passedHeadersToKeySerializer = new AtomicBoolean(false);
    final AtomicBoolean passedHeadersToValueSerializer = new AtomicBoolean(false);
    final AtomicBoolean passedHeadersToKeyDeserializer = new AtomicBoolean(false);
    final AtomicBoolean passedHeadersToValueDeserializer = new AtomicBoolean(false);
    final Serializer<byte[]> keySerializer = new ByteArraySerializer() {

        @Override
        public byte[] serialize(final String topic, final Headers headers, final byte[] data) {
            passedHeadersToKeySerializer.set(true);
            return serialize(topic, data);
        }
    };
    final Serializer<byte[]> valueSerializer = new ByteArraySerializer() {

        @Override
        public byte[] serialize(final String topic, final Headers headers, final byte[] data) {
            passedHeadersToValueSerializer.set(true);
            return serialize(topic, data);
        }
    };
    final Deserializer<byte[]> keyDeserializer = new ByteArrayDeserializer() {

        @Override
        public byte[] deserialize(final String topic, final Headers headers, final byte[] data) {
            passedHeadersToKeyDeserializer.set(true);
            return deserialize(topic, data);
        }
    };
    final Deserializer<byte[]> valueDeserializer = new ByteArrayDeserializer() {

        @Override
        public byte[] deserialize(final String topic, final Headers headers, final byte[] data) {
            passedHeadersToValueDeserializer.set(true);
            return deserialize(topic, data);
        }
    };
    final TestInputTopic<byte[], byte[]> inputTopic = testDriver.createInputTopic(SOURCE_TOPIC_1, keySerializer, valueSerializer);
    final TestOutputTopic<byte[], byte[]> outputTopic = testDriver.createOutputTopic(SINK_TOPIC_1, keyDeserializer, valueDeserializer);
    inputTopic.pipeInput(testRecord1);
    outputTopic.readRecord();
    assertThat(passedHeadersToKeySerializer.get(), equalTo(true));
    assertThat(passedHeadersToValueSerializer.get(), equalTo(true));
    assertThat(passedHeadersToKeyDeserializer.get(), equalTo(true));
    assertThat(passedHeadersToValueDeserializer.get(), equalTo(true));
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Test(org.junit.jupiter.api.Test)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7