use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder in project kafka by apache.
the class ErrorHandlingIntegrationTest method shouldBackOffTaskAndEmitDataWithinSameTopology.
@Test
public void shouldBackOffTaskAndEmitDataWithinSameTopology() throws Exception {
final AtomicInteger noOutputExpected = new AtomicInteger(0);
final AtomicInteger outputExpected = new AtomicInteger(0);
try (final KafkaStreamsNamedTopologyWrapper kafkaStreams = new KafkaStreamsNamedTopologyWrapper(properties)) {
kafkaStreams.setUncaughtExceptionHandler(exception -> StreamThreadExceptionResponse.REPLACE_THREAD);
final NamedTopologyBuilder builder = kafkaStreams.newNamedTopologyBuilder("topology_A");
builder.stream(inputTopic).peek((k, v) -> outputExpected.incrementAndGet()).to(outputTopic);
builder.stream(errorInputTopic).peek((k, v) -> {
throw new RuntimeException("Kaboom");
}).peek((k, v) -> noOutputExpected.incrementAndGet()).to(errorOutputTopic);
kafkaStreams.addNamedTopology(builder.build());
StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(kafkaStreams);
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(errorInputTopic, Arrays.asList(new KeyValue<>(1, "A")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(inputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
IntegrationTestUtils.waitUntilFinalKeyValueRecordsReceived(TestUtils.consumerConfig(CLUSTER.bootstrapServers(), IntegerDeserializer.class, StringDeserializer.class), outputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")));
assertThat(noOutputExpected.get(), equalTo(0));
assertThat(outputExpected.get(), equalTo(2));
}
}
use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldRemoveAndReplaceTopologicallyIncompatibleNamedTopology.
@Test
public void shouldRemoveAndReplaceTopologicallyIncompatibleNamedTopology() throws Exception {
CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
// Build up named topology with two stateful subtopologies
final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
inputStream1.groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream1.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
streams.start(singletonList(topology1Builder.build()));
waitForApplicationState(singletonList(streams), State.RUNNING, Duration.ofSeconds(30));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
streams.removeNamedTopology(TOPOLOGY_1).all().get();
streams.cleanUpNamedTopology(TOPOLOGY_1);
// Prepare a new named topology with the same name but an incompatible topology (stateful subtopologies swap order)
final NamedTopologyBuilder topology1Builder2 = streams.newNamedTopologyBuilder(TOPOLOGY_1);
final KStream<String, Long> inputStream2 = topology1Builder2.stream(DELAYED_INPUT_STREAM_4);
inputStream2.groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
inputStream2.groupByKey().count().toStream().to(COUNT_OUTPUT);
produceToInputTopics(DELAYED_INPUT_STREAM_4, STANDARD_INPUT_DATA);
streams.addNamedTopology(topology1Builder2.build()).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldPrefixAllInternalTopicNamesWithNamedTopology.
@Test
public void shouldPrefixAllInternalTopicNamesWithNamedTopology() throws Exception {
final String countTopologyName = "count-topology";
final String fkjTopologyName = "FKJ-topology";
final NamedTopologyBuilder countBuilder = streams.newNamedTopologyBuilder(countTopologyName);
countBuilder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count();
final NamedTopologyBuilder fkjBuilder = streams.newNamedTopologyBuilder(fkjTopologyName);
final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
final KTable<String, Long> left = fkjBuilder.table(INPUT_STREAM_2, Consumed.with(serdeScope.decorateSerde(Serdes.String(), props, true), serdeScope.decorateSerde(Serdes.Long(), props, false)));
final KTable<String, Long> right = fkjBuilder.table(INPUT_STREAM_3, Consumed.with(serdeScope.decorateSerde(Serdes.String(), props, true), serdeScope.decorateSerde(Serdes.Long(), props, false)));
left.join(right, Object::toString, (value1, value2) -> String.valueOf(value1 + value2), Materialized.with(null, serdeScope.decorateSerde(Serdes.String(), props, false)));
streams.start(asList(fkjBuilder.build(), countBuilder.build()));
waitForApplicationState(singletonList(streams), State.RUNNING, Duration.ofSeconds(60));
final String countTopicPrefix = TOPIC_PREFIX + "-" + countTopologyName;
final String fkjTopicPrefix = TOPIC_PREFIX + "-" + fkjTopologyName;
final Set<String> internalTopics = CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains(TOPIC_PREFIX)).filter(t -> t.endsWith("-repartition") || t.endsWith("-changelog") || t.endsWith("-topic")).collect(Collectors.toSet());
assertThat(internalTopics, is(mkSet(countTopicPrefix + "-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition", countTopicPrefix + "-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog", fkjTopicPrefix + "-KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000006-topic", fkjTopicPrefix + "-KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-0000000014-topic", fkjTopicPrefix + "-KTABLE-FK-JOIN-SUBSCRIPTION-STATE-STORE-0000000010-changelog", fkjTopicPrefix + "-" + INPUT_STREAM_2 + "-STATE-STORE-0000000000-changelog", fkjTopicPrefix + "-" + INPUT_STREAM_3 + "-STATE-STORE-0000000003-changelog")));
}
use of org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology.
@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final int numStreamThreads = 2;
final Properties streamsConfiguration1 = streamsConfiguration();
streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final Properties streamsConfiguration2 = streamsConfiguration();
streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final String topologyA = "topology-A";
final KafkaStreamsNamedTopologyWrapper kafkaStreams1 = createNamedTopologyKafkaStreams(streamsConfiguration1);
final KafkaStreamsNamedTopologyWrapper kafkaStreams2 = createNamedTopologyKafkaStreams(streamsConfiguration2);
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
final NamedTopologyBuilder builder1A = kafkaStreams1.newNamedTopologyBuilder(topologyA, streamsConfiguration1);
getStreamsBuilderWithTopology(builder1A, semaphore);
final NamedTopologyBuilder builder2A = kafkaStreams2.newNamedTopologyBuilder(topologyA, streamsConfiguration2);
getStreamsBuilderWithTopology(builder2A, semaphore);
kafkaStreams1.start(builder1A.build());
kafkaStreams2.start(builder2A.build());
waitForApplicationState(kafkaStreamsList, State.RUNNING, Duration.ofSeconds(60));
assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer(), topologyA);
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
// Assert that both active and standby are able to query for a key
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
return store1.get(key) != null;
}, "store1 cannot find results for key");
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
return store2.get(key) != null;
}, "store2 cannot find results for key");
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
// Assert that
assertThat(store3.get(key), is(nullValue()));
assertThat(store4.get(key), is(nullValue()));
}
Aggregations