use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning.
@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning() throws Exception {
CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
// Build up named topology with two stateful subtopologies
final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
inputStream1.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream1.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
streams.start();
final NamedTopology namedTopology = topology1Builder.build();
streams.addNamedTopology(namedTopology).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
streams.removeNamedTopology(TOPOLOGY_1, true).all().get();
streams.cleanUpNamedTopology(TOPOLOGY_1);
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog") || t.contains("-repartition")).forEach(t -> {
try {
CLUSTER.deleteTopicsAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
inputStream.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
final NamedTopology namedTopologyDup = topology1BuilderDup.build();
streams.addNamedTopology(namedTopologyDup).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.
the class NamedTopologyTest method shouldReturnTopologyByName.
@Test
public void shouldReturnTopologyByName() {
final NamedTopology topology1 = builder1.build();
final NamedTopology topology2 = builder2.build();
final NamedTopology topology3 = builder3.build();
streams.start(asList(topology1, topology2, topology3));
assertThat(streams.getTopologyByName("topology-1").get(), equalTo(topology1));
assertThat(streams.getTopologyByName("topology-2").get(), equalTo(topology2));
assertThat(streams.getTopologyByName("topology-3").get(), equalTo(topology3));
}
use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology.
@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopology() throws Exception {
CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
// Build up named topology with two stateful subtopologies
final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
inputStream1.groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream1.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
streams.start();
final NamedTopology namedTopology = topology1Builder.build();
streams.addNamedTopology(namedTopology).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
streams.removeNamedTopology("topology-1", true).all().get();
streams.cleanUpNamedTopology("topology-1");
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("changelog")).forEach(t -> {
try {
CLUSTER.deleteTopicAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
inputStream.groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
final NamedTopology namedTopologyDup = topology1BuilderDup.build();
streams.addNamedTopology(namedTopologyDup).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
Aggregations