use of org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddNamedTopologyToRunningApplicationWithMultipleNodes.
@Test
public void shouldAddNamedTopologyToRunningApplicationWithMultipleNodes() throws Exception {
setupSecondKafkaStreams();
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology2Builder.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
topology2Builder2.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
streams.start(topology1Builder.build());
streams2.start(topology1Builder2.build());
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
final AddNamedTopologyResult result = streams.addNamedTopology(topology2Builder.build());
final AddNamedTopologyResult result2 = streams2.addNamedTopology(topology2Builder2.build());
result.all().get();
result2.all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
use of org.apache.kafka.streams.processor.internals.namedtopology.AddNamedTopologyResult in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets.
@Test
public void shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets() throws Exception {
setupSecondKafkaStreams();
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
streams.start(topology1Builder.build());
streams2.start(topology1Builder2.build());
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
final RemoveNamedTopologyResult result = streams.removeNamedTopology(TOPOLOGY_1, true);
streams2.removeNamedTopology(TOPOLOGY_1, true).all().get();
result.all().get();
assertThat(streams.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
assertThat(streams2.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
streams.cleanUpNamedTopology(TOPOLOGY_1);
streams2.cleanUpNamedTopology(TOPOLOGY_1);
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog")).forEach(t -> {
try {
CLUSTER.deleteTopicAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
topology2Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
topology2Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
final AddNamedTopologyResult result1 = streams.addNamedTopology(topology2Builder.build());
streams2.addNamedTopology(topology2Builder2.build()).all().get();
result1.all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
Aggregations