Search in sources :

Example 6 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddNamedTopologyToRunningApplicationWithMultipleNodes.

@Test
public void shouldAddNamedTopologyToRunningApplicationWithMultipleNodes() throws Exception {
    setupSecondKafkaStreams();
    topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
    topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
    topology2Builder.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
    topology2Builder2.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
    streams.start(topology1Builder.build());
    streams2.start(topology1Builder2.build());
    waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
    final AddNamedTopologyResult result = streams.addNamedTopology(topology2Builder.build());
    final AddNamedTopologyResult result2 = streams2.addNamedTopology(topology2Builder2.build());
    result.all().get();
    result2.all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Test(org.junit.Test)

Example 7 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets.

@Test
public void shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets() throws Exception {
    setupSecondKafkaStreams();
    topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
    topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
    streams.start(topology1Builder.build());
    streams2.start(topology1Builder2.build());
    waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
    final RemoveNamedTopologyResult result = streams.removeNamedTopology(TOPOLOGY_1, true);
    streams2.removeNamedTopology(TOPOLOGY_1, true).all().get();
    result.all().get();
    assertThat(streams.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
    assertThat(streams2.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
    streams.cleanUpNamedTopology(TOPOLOGY_1);
    streams2.cleanUpNamedTopology(TOPOLOGY_1);
    CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog")).forEach(t -> {
        try {
            CLUSTER.deleteTopicAndWait(t);
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
    });
    topology2Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
    topology2Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
    final AddNamedTopologyResult result1 = streams.addNamedTopology(topology2Builder.build());
    streams2.addNamedTopology(topology2Builder2.build()).all().get();
    result1.all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) Test(org.junit.Test)

Example 8 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning.

@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning() throws Exception {
    CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
    // Build up named topology with two stateful subtopologies
    final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
    inputStream1.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream1.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    streams.start();
    final NamedTopology namedTopology = topology1Builder.build();
    streams.addNamedTopology(namedTopology).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    streams.removeNamedTopology(TOPOLOGY_1, true).all().get();
    streams.cleanUpNamedTopology(TOPOLOGY_1);
    CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog") || t.contains("-repartition")).forEach(t -> {
        try {
            CLUSTER.deleteTopicsAndWait(t);
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
    });
    final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
    inputStream.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    final NamedTopology namedTopologyDup = topology1BuilderDup.build();
    streams.addNamedTopology(namedTopologyDup).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) KeyValue(org.apache.kafka.streams.KeyValue) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) Test(org.junit.Test)

Example 9 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.

the class StreamsResetter method maybeResetInputAndSeekToEndIntermediateTopicOffsets.

private int maybeResetInputAndSeekToEndIntermediateTopicOffsets(final Map<Object, Object> consumerConfig, final boolean dryRun) throws IOException, ParseException {
    final List<String> inputTopics = options.valuesOf(inputTopicsOption);
    final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
    int topicNotFound = EXIT_CODE_SUCCESS;
    final List<String> notFoundInputTopics = new ArrayList<>();
    final List<String> notFoundIntermediateTopics = new ArrayList<>();
    final String groupId = options.valueOf(applicationIdOption);
    if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
        System.out.println("No input or intermediate topics specified. Skipping seek.");
        return EXIT_CODE_SUCCESS;
    }
    if (inputTopics.size() != 0) {
        System.out.println("Reset-offsets for input topics " + inputTopics);
    }
    if (intermediateTopics.size() != 0) {
        System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
    }
    final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
    for (final String topic : inputTopics) {
        if (!allTopics.contains(topic)) {
            notFoundInputTopics.add(topic);
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    for (final String topic : intermediateTopics) {
        if (!allTopics.contains(topic)) {
            notFoundIntermediateTopics.add(topic);
        } else {
            topicsToSubscribe.add(topic);
        }
    }
    if (!notFoundInputTopics.isEmpty()) {
        System.out.println("Following input topics are not found, skipping them");
        for (final String topic : notFoundInputTopics) {
            System.out.println("Topic: " + topic);
        }
        topicNotFound = EXIT_CODE_ERROR;
    }
    if (!notFoundIntermediateTopics.isEmpty()) {
        System.out.println("Following intermediate topics are not found, skipping them");
        for (final String topic : notFoundIntermediateTopics) {
            System.out.println("Topic:" + topic);
        }
        topicNotFound = EXIT_CODE_ERROR;
    }
    // try to poll with an empty subscription)
    if (topicsToSubscribe.isEmpty()) {
        return topicNotFound;
    }
    final Properties config = new Properties();
    config.putAll(consumerConfig);
    config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
        final Collection<TopicPartition> partitions = topicsToSubscribe.stream().map(client::partitionsFor).flatMap(Collection::stream).map(info -> new TopicPartition(info.topic(), info.partition())).collect(Collectors.toList());
        client.assign(partitions);
        final Set<TopicPartition> inputTopicPartitions = new HashSet<>();
        final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
        for (final TopicPartition p : partitions) {
            final String topic = p.topic();
            if (isInputTopic(topic)) {
                inputTopicPartitions.add(p);
            } else if (isIntermediateTopic(topic)) {
                intermediateTopicPartitions.add(p);
            } else {
                System.err.println("Skipping invalid partition: " + p);
            }
        }
        maybeReset(groupId, client, inputTopicPartitions);
        maybeSeekToEnd(groupId, client, intermediateTopicPartitions);
        if (!dryRun) {
            for (final TopicPartition p : partitions) {
                client.position(p);
            }
            client.commitSync();
        }
    } catch (final IOException | ParseException e) {
        System.err.println("ERROR: Resetting offsets failed.");
        throw e;
    }
    System.out.println("Done.");
    return topicNotFound;
}
Also used : Exit(org.apache.kafka.common.utils.Exit) HashMap(java.util.HashMap) DescribeConsumerGroupsOptions(org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) OptionException(joptsimple.OptionException) InterfaceStability(org.apache.kafka.common.annotation.InterfaceStability) CommandLineUtils(kafka.utils.CommandLineUtils) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) Duration(java.time.Duration) Map(java.util.Map) OptionParser(joptsimple.OptionParser) Admin(org.apache.kafka.clients.admin.Admin) DeleteTopicsResult(org.apache.kafka.clients.admin.DeleteTopicsResult) ParseException(java.text.ParseException) LinkedList(java.util.LinkedList) OptionSet(joptsimple.OptionSet) OptionSpec(joptsimple.OptionSpec) Consumer(org.apache.kafka.clients.consumer.Consumer) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) DescribeConsumerGroupsResult(org.apache.kafka.clients.admin.DescribeConsumerGroupsResult) Properties(java.util.Properties) Collection(java.util.Collection) RemoveMembersFromConsumerGroupOptions(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) IOException(java.io.IOException) KafkaFuture(org.apache.kafka.common.KafkaFuture) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) JavaConverters(scala.collection.JavaConverters) Optional(java.util.Optional) MemberDescription(org.apache.kafka.clients.admin.MemberDescription) OptionSpecBuilder(joptsimple.OptionSpecBuilder) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ArrayList(java.util.ArrayList) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) IOException(java.io.IOException) Properties(java.util.Properties) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) ParseException(java.text.ParseException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) HashSet(java.util.HashSet)

Example 10 with ConsumerConfig

use of org.apache.kafka.clients.consumer.ConsumerConfig in project pinpoint by naver.

the class ConsumerConstructorInterceptor method after.

@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
    if (isDebug) {
        logger.afterInterceptor(target, args, result, throwable);
    }
    if (throwable != null) {
        return;
    }
    if (!(target instanceof RemoteAddressFieldAccessor)) {
        return;
    }
    ConsumerConfig consumerConfig = getConsumerConfig(args);
    if (consumerConfig == null) {
        return;
    }
    String remoteAddress = getRemoteAddress(consumerConfig);
    ((RemoteAddressFieldAccessor) target)._$PINPOINT$_setRemoteAddress(remoteAddress);
}
Also used : RemoteAddressFieldAccessor(com.navercorp.pinpoint.plugin.kafka.field.accessor.RemoteAddressFieldAccessor) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig)

Aggregations

ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)35 List (java.util.List)31 Map (java.util.Map)24 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 Pattern (java.util.regex.Pattern)23 Optional (java.util.Optional)22 TimeUnit (java.util.concurrent.TimeUnit)20 UUID (java.util.UUID)19 Handler (io.vertx.core.Handler)18 Vertx (io.vertx.core.Vertx)18 Buffer (io.vertx.core.buffer.Buffer)18 KafkaConsumerRecord (io.vertx.kafka.client.consumer.KafkaConsumerRecord)18 Logger (org.slf4j.Logger)18 LoggerFactory (org.slf4j.LoggerFactory)18 Instant (java.time.Instant)17 HashMap (java.util.HashMap)17 Truth.assertThat (com.google.common.truth.Truth.assertThat)16 Future (io.vertx.core.Future)16 Promise (io.vertx.core.Promise)16