Search in sources :

Example 1 with NamedTopology

use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning.

@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning() throws Exception {
    CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
    // Build up named topology with two stateful subtopologies
    final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
    inputStream1.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream1.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    streams.start();
    final NamedTopology namedTopology = topology1Builder.build();
    streams.addNamedTopology(namedTopology).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    streams.removeNamedTopology(TOPOLOGY_1, true).all().get();
    streams.cleanUpNamedTopology(TOPOLOGY_1);
    CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog") || t.contains("-repartition")).forEach(t -> {
        try {
            CLUSTER.deleteTopicsAndWait(t);
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
    });
    final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
    inputStream.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    final NamedTopology namedTopologyDup = topology1BuilderDup.build();
    streams.addNamedTopology(namedTopologyDup).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) KeyValue(org.apache.kafka.streams.KeyValue) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) Test(org.junit.Test)

Example 2 with NamedTopology

use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.

the class NamedTopologyTest method shouldReturnTopologyByName.

@Test
public void shouldReturnTopologyByName() {
    final NamedTopology topology1 = builder1.build();
    final NamedTopology topology2 = builder2.build();
    final NamedTopology topology3 = builder3.build();
    streams.start(asList(topology1, topology2, topology3));
    assertThat(streams.getTopologyByName("topology-1").get(), equalTo(topology1));
    assertThat(streams.getTopologyByName("topology-2").get(), equalTo(topology2));
    assertThat(streams.getTopologyByName("topology-3").get(), equalTo(topology3));
}
Also used : NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) Test(org.junit.Test)

Example 3 with NamedTopology

use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.

the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology.

@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology() throws Exception {
    CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
    // Build up named topology with two stateful subtopologies
    final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
    inputStream1.groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream1.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    streams.start();
    final NamedTopology namedTopology = topology1Builder.build();
    streams.addNamedTopology(namedTopology).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    streams.removeNamedTopology("topology-1", true).all().get();
    streams.cleanUpNamedTopology("topology-1");
    CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("changelog")).forEach(t -> {
        try {
            CLUSTER.deleteTopicAndWait(t);
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
    });
    final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
    inputStream.groupByKey().count().toStream().to(COUNT_OUTPUT);
    inputStream.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
    final NamedTopology namedTopologyDup = topology1BuilderDup.build();
    streams.addNamedTopology(namedTopologyDup).all().get();
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
    assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
    CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) Stores(org.apache.kafka.streams.state.Stores) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.notNullValue(org.hamcrest.CoreMatchers.notNullValue) Collections.singletonList(java.util.Collections.singletonList) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ClientUtils.extractThreadId(org.apache.kafka.streams.processor.internals.ClientUtils.extractThreadId) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) StreamsMetadata(org.apache.kafka.streams.StreamsMetadata) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) KafkaClientSupplier(org.apache.kafka.streams.KafkaClientSupplier) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) AddNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult) Queue(java.util.Queue) Pattern(java.util.regex.Pattern) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) CoreMatchers.not(org.hamcrest.CoreMatchers.not) NamedTopologyStoreQueryParameters(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyStoreQueryParameters) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KeyValue.pair(org.apache.kafka.streams.KeyValue.pair) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) CoreMatchers.nullValue(org.hamcrest.CoreMatchers.nullValue) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Consumed(org.apache.kafka.streams.kstream.Consumed) StreamsMetadataImpl(org.apache.kafka.streams.state.internals.StreamsMetadataImpl) Test(org.junit.Test) RemoveNamedTopologyResult(org.apache.kafka.streams.processor.internals.namedtopology.RemoveNamedTopologyResult) NOT_AVAILABLE(org.apache.kafka.streams.KeyQueryMetadata.NOT_AVAILABLE) Rule(org.junit.Rule) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) LagInfo(org.apache.kafka.streams.LagInfo) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) NamedTopology(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology) Test(org.junit.Test)

Aggregations

Duration (java.time.Duration)2 Arrays.asList (java.util.Arrays.asList)2 Collection (java.util.Collection)2 Collections.singleton (java.util.Collections.singleton)2 Collections.singletonList (java.util.Collections.singletonList)2 HashMap (java.util.HashMap)2 Iterator (java.util.Iterator)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Map (java.util.Map)2 Optional (java.util.Optional)2 Properties (java.util.Properties)2 Queue (java.util.Queue)2 Set (java.util.Set)2 Pattern (java.util.regex.Pattern)2 Collectors (java.util.stream.Collectors)2 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 LongDeserializer (org.apache.kafka.common.serialization.LongDeserializer)2 LongSerializer (org.apache.kafka.common.serialization.LongSerializer)2