use of org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper in project kafka by apache.
the class ErrorHandlingIntegrationTest method shouldBackOffTaskAndEmitDataWithinSameTopology.
@Test
public void shouldBackOffTaskAndEmitDataWithinSameTopology() throws Exception {
final AtomicInteger noOutputExpected = new AtomicInteger(0);
final AtomicInteger outputExpected = new AtomicInteger(0);
try (final KafkaStreamsNamedTopologyWrapper kafkaStreams = new KafkaStreamsNamedTopologyWrapper(properties)) {
kafkaStreams.setUncaughtExceptionHandler(exception -> StreamThreadExceptionResponse.REPLACE_THREAD);
final NamedTopologyBuilder builder = kafkaStreams.newNamedTopologyBuilder("topology_A");
builder.stream(inputTopic).peek((k, v) -> outputExpected.incrementAndGet()).to(outputTopic);
builder.stream(errorInputTopic).peek((k, v) -> {
throw new RuntimeException("Kaboom");
}).peek((k, v) -> noOutputExpected.incrementAndGet()).to(errorOutputTopic);
kafkaStreams.addNamedTopology(builder.build());
StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(kafkaStreams);
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(errorInputTopic, Arrays.asList(new KeyValue<>(1, "A")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(inputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
IntegrationTestUtils.waitUntilFinalKeyValueRecordsReceived(TestUtils.consumerConfig(CLUSTER.bootstrapServers(), IntegerDeserializer.class, StringDeserializer.class), outputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")));
assertThat(noOutputExpected.get(), equalTo(0));
assertThat(outputExpected.get(), equalTo(2));
}
}
use of org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper in project kafka by apache.
the class NamedTopologyIntegrationTest method setup.
@Before
public void setup() throws Exception {
appId = safeUniqueTestName(NamedTopologyIntegrationTest.class, testName);
changelog1 = TOPIC_PREFIX + "-" + TOPOLOGY_1 + "-store-changelog";
changelog2 = TOPIC_PREFIX + "-" + TOPOLOGY_2 + "-store-changelog";
changelog3 = TOPIC_PREFIX + "-" + TOPOLOGY_3 + "-store-changelog";
props = configProps(appId, "host1");
streams = new KafkaStreamsNamedTopologyWrapper(props, clientSupplier);
topology1Builder = streams.newNamedTopologyBuilder(TOPOLOGY_1);
topology1BuilderDup = streams.newNamedTopologyBuilder(TOPOLOGY_1);
topology2Builder = streams.newNamedTopologyBuilder(TOPOLOGY_2);
topology3Builder = streams.newNamedTopologyBuilder(TOPOLOGY_3);
// TODO KAFKA-12648: refactor to avoid deleting & (re)creating outputs topics for each test
CLUSTER.createTopic(OUTPUT_STREAM_1, 2, 1);
CLUSTER.createTopic(OUTPUT_STREAM_2, 2, 1);
CLUSTER.createTopic(OUTPUT_STREAM_3, 2, 1);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper in project kafka by apache.
the class StoreQueryIntegrationTest method createNamedTopologyKafkaStreams.
private KafkaStreamsNamedTopologyWrapper createNamedTopologyKafkaStreams(final Properties config) {
final KafkaStreamsNamedTopologyWrapper streams = new KafkaStreamsNamedTopologyWrapper(config);
streamsToCleanup.add(streams);
return streams;
}
use of org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper in project kafka by apache.
the class NamedTopologyIntegrationTest method setupSecondKafkaStreams.
private void setupSecondKafkaStreams() {
props2 = configProps(appId, "host2");
streams2 = new KafkaStreamsNamedTopologyWrapper(props2, clientSupplier);
topology1Builder2 = streams2.newNamedTopologyBuilder(TOPOLOGY_1);
topology2Builder2 = streams2.newNamedTopologyBuilder(TOPOLOGY_2);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper in project kafka by apache.
the class StoreQueryIntegrationTest method shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology.
@Test
public void shouldQuerySpecificStalePartitionStoresMultiStreamThreadsNamedTopology() throws Exception {
final int batch1NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final int numStreamThreads = 2;
final Properties streamsConfiguration1 = streamsConfiguration();
streamsConfiguration1.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final Properties streamsConfiguration2 = streamsConfiguration();
streamsConfiguration2.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numStreamThreads);
final String topologyA = "topology-A";
final KafkaStreamsNamedTopologyWrapper kafkaStreams1 = createNamedTopologyKafkaStreams(streamsConfiguration1);
final KafkaStreamsNamedTopologyWrapper kafkaStreams2 = createNamedTopologyKafkaStreams(streamsConfiguration2);
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
final NamedTopologyBuilder builder1A = kafkaStreams1.newNamedTopologyBuilder(topologyA, streamsConfiguration1);
getStreamsBuilderWithTopology(builder1A, semaphore);
final NamedTopologyBuilder builder2A = kafkaStreams2.newNamedTopologyBuilder(topologyA, streamsConfiguration2);
getStreamsBuilderWithTopology(builder2A, semaphore);
kafkaStreams1.start(builder1A.build());
kafkaStreams2.start(builder2A.build());
waitForApplicationState(kafkaStreamsList, State.RUNNING, Duration.ofSeconds(60));
assertTrue(kafkaStreams1.metadataForLocalThreads().size() > 1);
assertTrue(kafkaStreams2.metadataForLocalThreads().size() > 1);
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, new IntegerSerializer(), topologyA);
// key belongs to this partition
final int keyPartition = keyQueryMetadata.partition();
// key doesn't belongs to this partition
final int keyDontBelongPartition = (keyPartition == 0) ? 1 : 0;
final QueryableStoreType<ReadOnlyKeyValueStore<Integer, Integer>> queryableStoreType = keyValueStore();
// Assert that both active and standby are able to query for a key
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> param = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyPartition);
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store1 = getStore(kafkaStreams1, param);
return store1.get(key) != null;
}, "store1 cannot find results for key");
TestUtils.waitForCondition(() -> {
final ReadOnlyKeyValueStore<Integer, Integer> store2 = getStore(kafkaStreams2, param);
return store2.get(key) != null;
}, "store2 cannot find results for key");
final NamedTopologyStoreQueryParameters<ReadOnlyKeyValueStore<Integer, Integer>> otherParam = NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(topologyA, TABLE_NAME, queryableStoreType).enableStaleStores().withPartition(keyDontBelongPartition);
final ReadOnlyKeyValueStore<Integer, Integer> store3 = getStore(kafkaStreams1, otherParam);
final ReadOnlyKeyValueStore<Integer, Integer> store4 = getStore(kafkaStreams2, otherParam);
// Assert that
assertThat(store3.get(key), is(nullValue()));
assertThat(store4.get(key), is(nullValue()));
}
Aggregations