Search in sources :

Example 1 with Topology

use of org.apache.kafka.streams.Topology in project ksql by confluentinc.

the class JoinNodeTest method shouldHaveLeftJoin.

@Test
public void shouldHaveLeftJoin() {
    setupTopicClientExpectations(1, 1);
    buildJoin();
    final Topology topology = builder.build();
    final TopologyDescription.Processor leftJoin = (TopologyDescription.Processor) getNodeByName(topology, "KSTREAM-LEFTJOIN-0000000014");
    final List<String> predecessors = leftJoin.predecessors().stream().map(TopologyDescription.Node::name).collect(Collectors.toList());
    assertThat(leftJoin.stores(), equalTo(Utils.mkSet("KSTREAM-REDUCE-STATE-STORE-0000000003")));
    assertThat(predecessors, equalTo(Collections.singletonList("KSTREAM-SOURCE-0000000013")));
}
Also used : Topology(org.apache.kafka.streams.Topology) TopologyDescription(org.apache.kafka.streams.TopologyDescription) Test(org.junit.Test)

Example 2 with Topology

use of org.apache.kafka.streams.Topology in project ksql by confluentinc.

the class PlanTestUtil method getNodeByName.

static TopologyDescription.Node getNodeByName(final Topology topology, final String nodeName) {
    final TopologyDescription description = topology.describe();
    final Set<TopologyDescription.Subtopology> subtopologies = description.subtopologies();
    List<TopologyDescription.Node> nodes = subtopologies.stream().flatMap(subtopology -> subtopology.nodes().stream()).collect(Collectors.toList());
    final Map<String, List<TopologyDescription.Node>> nodesByName = nodes.stream().collect(Collectors.groupingBy(TopologyDescription.Node::name));
    return nodesByName.get(nodeName).get(0);
}
Also used : TopologyDescription(org.apache.kafka.streams.TopologyDescription) List(java.util.List) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Map(java.util.Map) Set(java.util.Set) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Collectors(java.util.stream.Collectors) Topology(org.apache.kafka.streams.Topology) List(java.util.List) TopologyDescription(org.apache.kafka.streams.TopologyDescription)

Example 3 with Topology

use of org.apache.kafka.streams.Topology in project apache-kafka-on-k8s by banzaicloud.

the class RestoreIntegrationTest method shouldProcessDataFromStoresWithLoggingDisabled.

@Test
public void shouldProcessDataFromStoresWithLoggingDisabled() throws InterruptedException, ExecutionException {
    IntegrationTestUtils.produceKeyValuesSynchronously(INPUT_STREAM_2, Arrays.asList(KeyValue.pair(1, 1), KeyValue.pair(2, 2), KeyValue.pair(3, 3)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, IntegerSerializer.class), CLUSTER.time);
    final KeyValueBytesStoreSupplier lruMapSupplier = Stores.lruMap(INPUT_STREAM_2, 10);
    final StoreBuilder<KeyValueStore<Integer, Integer>> storeBuilder = new KeyValueStoreBuilder<>(lruMapSupplier, Serdes.Integer(), Serdes.Integer(), CLUSTER.time).withLoggingDisabled();
    final StreamsBuilder streamsBuilder = new StreamsBuilder();
    streamsBuilder.addStateStore(storeBuilder);
    final KStream<Integer, Integer> stream = streamsBuilder.stream(INPUT_STREAM_2);
    final CountDownLatch processorLatch = new CountDownLatch(3);
    stream.process(new ProcessorSupplier<Integer, Integer>() {

        @Override
        public Processor<Integer, Integer> get() {
            return new KeyValueStoreProcessor(INPUT_STREAM_2, processorLatch);
        }
    }, INPUT_STREAM_2);
    final Topology topology = streamsBuilder.build();
    kafkaStreams = new KafkaStreams(topology, props(applicationId + "-logging-disabled"));
    final CountDownLatch latch = new CountDownLatch(1);
    kafkaStreams.setStateListener(new KafkaStreams.StateListener() {

        @Override
        public void onChange(final KafkaStreams.State newState, final KafkaStreams.State oldState) {
            if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
                latch.countDown();
            }
        }
    });
    kafkaStreams.start();
    latch.await(30, TimeUnit.SECONDS);
    assertTrue(processorLatch.await(30, TimeUnit.SECONDS));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Processor(org.apache.kafka.streams.processor.Processor) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Topology(org.apache.kafka.streams.Topology) CountDownLatch(java.util.concurrent.CountDownLatch) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 4 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class KStreamKStreamJoinTest method shouldCreateRepartitionTopicsWithUserProvidedName.

@Test
public void shouldCreateRepartitionTopicsWithUserProvidedName() {
    final StreamsBuilder builder = new StreamsBuilder();
    final Properties props = new Properties();
    props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.NO_OPTIMIZATION);
    final KStream<String, String> stream1 = builder.stream("topic", Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> stream2 = builder.stream("topic2", Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> stream3 = builder.stream("topic3", Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> newStream = stream1.map((k, v) -> new KeyValue<>(v, k));
    final StreamJoined<String, String, String> streamJoined = StreamJoined.with(Serdes.String(), Serdes.String(), Serdes.String());
    newStream.join(stream2, (value1, value2) -> value1 + value2, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)), streamJoined.withName("first-join")).to("out-one");
    newStream.join(stream3, (value1, value2) -> value1 + value2, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)), streamJoined.withName("second-join")).to("out-two");
    final Topology topology = builder.build(props);
    System.out.println(topology.describe().toString());
    assertEquals(expectedTopologyWithUserNamedRepartitionTopics, topology.describe().toString());
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) InternalTopicConfig(org.apache.kafka.streams.processor.internals.InternalTopicConfig) Arrays(java.util.Arrays) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Assert.assertThrows(org.junit.Assert.assertThrows) Stores(org.apache.kafka.streams.state.Stores) SUBTOPOLOGY_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_0) StreamsException(org.apache.kafka.streams.errors.StreamsException) KStream(org.apache.kafka.streams.kstream.KStream) MockApiProcessor(org.apache.kafka.test.MockApiProcessor) StreamJoined(org.apache.kafka.streams.kstream.StreamJoined) HashSet(java.util.HashSet) Duration.ofHours(java.time.Duration.ofHours) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Duration(java.time.Duration) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) MockValueJoiner(org.apache.kafka.test.MockValueJoiner) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Instant(java.time.Instant) WindowBytesStoreSupplier(org.apache.kafka.streams.state.WindowBytesStoreSupplier) KeyValueTimestamp(org.apache.kafka.streams.KeyValueTimestamp) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) TestInputTopic(org.apache.kafka.streams.TestInputTopic) InternalTopologyBuilder(org.apache.kafka.streams.processor.internals.InternalTopologyBuilder) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Collections(java.util.Collections) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) Test(org.junit.Test)

Example 5 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class KStreamImplTest method shouldSupportStreamTableJoinWithKStreamToKTable.

@Test
public void shouldSupportStreamTableJoinWithKStreamToKTable() {
    final StreamsBuilder builder = new StreamsBuilder();
    final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String());
    final String streamTopic = "streamTopic";
    final String tableTopic = "tableTopic";
    final String outputTopic = "output";
    final KStream<String, String> stream = builder.stream(streamTopic, consumed);
    final KTable<String, String> table = builder.stream(tableTopic, consumed).toTable();
    stream.join(table, MockValueJoiner.TOSTRING_JOINER).to(outputTopic);
    final Topology topology = builder.build(props);
    final String topologyDescription = topology.describe().toString();
    assertThat(topologyDescription, equalTo("Topologies:\n" + "   Sub-topology: 0\n" + "    Source: KSTREAM-SOURCE-0000000000 (topics: [streamTopic])\n" + "      --> KSTREAM-JOIN-0000000004\n" + "    Processor: KSTREAM-JOIN-0000000004 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000003])\n" + "      --> KSTREAM-SINK-0000000005\n" + "      <-- KSTREAM-SOURCE-0000000000\n" + "    Source: KSTREAM-SOURCE-0000000001 (topics: [tableTopic])\n" + "      --> KSTREAM-TOTABLE-0000000002\n" + "    Sink: KSTREAM-SINK-0000000005 (topic: output)\n" + "      <-- KSTREAM-JOIN-0000000004\n" + "    Processor: KSTREAM-TOTABLE-0000000002 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000003])\n" + "      --> none\n" + "      <-- KSTREAM-SOURCE-0000000001\n\n"));
    try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) {
        final TestInputTopic<String, String> left = driver.createInputTopic(streamTopic, new StringSerializer(), new StringSerializer());
        final TestInputTopic<String, String> right = driver.createInputTopic(tableTopic, new StringSerializer(), new StringSerializer());
        final TestOutputTopic<String, String> output = driver.createOutputTopic(outputTopic, new StringDeserializer(), new StringDeserializer());
        right.pipeInput("lhs1", "rhsValue1");
        right.pipeInput("rhs2", "rhsValue2");
        right.pipeInput("lhs3", "rhsValue3");
        assertThat(output.readKeyValuesToMap(), is(emptyMap()));
        left.pipeInput("lhs1", "lhsValue1");
        left.pipeInput("lhs2", "lhsValue2");
        final Map<String, String> expected = mkMap(mkEntry("lhs1", "lhsValue1+rhsValue1"));
        assertThat(output.readKeyValuesToMap(), is(expected));
        left.pipeInput("lhs3", "lhsValue3");
        assertThat(output.readKeyValuesToMap(), is(mkMap(mkEntry("lhs3", "lhsValue3+rhsValue3"))));
        left.pipeInput("lhs1", "lhsValue4");
        assertThat(output.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "lhsValue4+rhsValue1"))));
    }
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) ProcessorTopology(org.apache.kafka.streams.processor.internals.ProcessorTopology) Topology(org.apache.kafka.streams.Topology) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Aggregations

Topology (org.apache.kafka.streams.Topology)87 Test (org.junit.Test)71 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)61 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)48 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)46 Properties (java.util.Properties)37 Serdes (org.apache.kafka.common.serialization.Serdes)31 KeyValue (org.apache.kafka.streams.KeyValue)30 StreamsConfig (org.apache.kafka.streams.StreamsConfig)26 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)25 Duration (java.time.Duration)22 Consumed (org.apache.kafka.streams.kstream.Consumed)22 KStream (org.apache.kafka.streams.kstream.KStream)22 Materialized (org.apache.kafka.streams.kstream.Materialized)22 List (java.util.List)21 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)21 KafkaStreams (org.apache.kafka.streams.KafkaStreams)20 Produced (org.apache.kafka.streams.kstream.Produced)20 Arrays (java.util.Arrays)19 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)19