use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class ProcessorTopologyTest method testPrefixScanInMemoryStoreWithCachingWithLogging.
@Test
public void testPrefixScanInMemoryStoreWithCachingWithLogging() {
final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()).withCachingEnabled().withLoggingEnabled(Collections.emptyMap());
topology.addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1).addProcessor("processor1", defineWithStores(() -> new StatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1").addSink("counts", OUTPUT_TOPIC_1, "processor1");
driver = new TopologyTestDriver(topology, props);
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER);
final TestOutputTopic<Integer, String> outputTopic1 = driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer());
inputTopic.pipeInput("key1", "value1");
inputTopic.pipeInput("key2", "value2");
inputTopic.pipeInput("key3", "value3");
inputTopic.pipeInput("key1", "value4");
assertTrue(outputTopic1.isEmpty());
final KeyValueStore<String, String> store = driver.getKeyValueStore(DEFAULT_STORE_NAME);
final List<KeyValue<String, String>> results = prefixScanResults(store, DEFAULT_PREFIX);
assertEquals("key1", results.get(0).key);
assertEquals("value4", results.get(0).value);
assertEquals("key2", results.get(1).key);
assertEquals("value2", results.get(1).value);
assertEquals("key3", results.get(2).key);
assertEquals("value3", results.get(2).value);
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class ProcessorTopologyTest method testPrefixScanLruMapWithCachingNoLoggingOldProcessor.
// testing old PAPI
@Deprecated
@Test
public void testPrefixScanLruMapWithCachingNoLoggingOldProcessor() {
final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder(Stores.lruMap(DEFAULT_STORE_NAME, 100), Serdes.String(), Serdes.String()).withCachingEnabled().withLoggingDisabled();
topology.addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1).addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1").addSink("counts", OUTPUT_TOPIC_1, "processor1");
driver = new TopologyTestDriver(topology, props);
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER);
final TestOutputTopic<Integer, String> outputTopic1 = driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer());
inputTopic.pipeInput("key1", "value1");
inputTopic.pipeInput("key2", "value2");
inputTopic.pipeInput("key3", "value3");
inputTopic.pipeInput("key1", "value4");
assertTrue(outputTopic1.isEmpty());
final KeyValueStore<String, String> store = driver.getKeyValueStore(DEFAULT_STORE_NAME);
final List<KeyValue<String, String>> results = prefixScanResults(store, DEFAULT_PREFIX);
assertEquals("key1", results.get(0).key);
assertEquals("value4", results.get(0).value);
assertEquals("key2", results.get(1).key);
assertEquals("value2", results.get(1).value);
assertEquals("key3", results.get(2).key);
assertEquals("value3", results.get(2).value);
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class SessionStoreFetchTest method testStoreConfig.
@Test
public void testStoreConfig() {
final Materialized<String, Long, SessionStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, STORE_NAME, enableLogging, enableCaching);
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream("input", Consumed.with(Serdes.String(), Serdes.String()));
stream.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(WINDOW_SIZE))).count(stateStoreConfig).toStream().to("output");
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
// get input topic and stateStore
final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
final SessionStore<String, Long> stateStore = driver.getSessionStore(STORE_NAME);
// write some data
final int medium = DATA_SIZE / 2 * 2;
for (int i = 0; i < records.size(); i++) {
final KeyValue<String, String> kv = records.get(i);
final long windowStartTime = i < medium ? 0 : 1500;
input.pipeInput(kv.key, kv.value, windowStartTime);
input.pipeInput(kv.key, kv.value, windowStartTime + WINDOW_SIZE);
}
verifyNormalQuery(stateStore);
verifyInfiniteQuery(stateStore);
verifyRangeQuery(stateStore);
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class WindowStoreFetchTest method testStoreConfig.
@Test
public void testStoreConfig() {
final Materialized<String, Long, WindowStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, STORE_NAME, enableLogging, enableCaching);
// Create topology: table from input topic
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream("input", Consumed.with(Serdes.String(), Serdes.String()));
stream.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(WINDOW_SIZE))).count(stateStoreConfig).toStream().to("output");
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
// get input topic and stateStore
final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
final WindowStore<String, Long> stateStore = driver.getWindowStore(STORE_NAME);
// write some data
final int medium = DATA_SIZE / 2 * 2;
for (int i = 0; i < records.size(); i++) {
final KeyValue<String, String> kv = records.get(i);
final long windowStartTime = i < medium ? 0 : WINDOW_SIZE;
input.pipeInput(kv.key, kv.value, windowStartTime + i);
}
// query the state store
try (final KeyValueIterator<Windowed<String>, Long> scanIterator = forward ? stateStore.fetchAll(0, Long.MAX_VALUE) : stateStore.backwardFetchAll(0, Long.MAX_VALUE)) {
final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = forward ? expectedRecords.iterator() : expectedRecords.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
try (final KeyValueIterator<Windowed<String>, Long> scanIterator = forward ? stateStore.fetch(null, null, 0, Long.MAX_VALUE) : stateStore.backwardFetch(null, null, 0, Long.MAX_VALUE)) {
final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = forward ? expectedRecords.iterator() : expectedRecords.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
testRange("range", stateStore, innerLow, innerHigh, forward);
testRange("until", stateStore, null, middle, forward);
testRange("from", stateStore, middle, null, forward);
testRange("untilBetween", stateStore, null, innerHighBetween, forward);
testRange("fromBetween", stateStore, innerLowBetween, null, forward);
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class KTableKTableForeignKeyJoinScenarioTest method validateTopologyCanProcessData.
private void validateTopologyCanProcessData(final StreamsBuilder builder) {
final Properties config = new Properties();
final String safeTestName = safeUniqueTestName(getClass(), testName);
config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.IntegerSerde.class.getName());
config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName());
config.setProperty(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
try (final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(builder.build(), config)) {
final TestInputTopic<Integer, String> aTopic = topologyTestDriver.createInputTopic("A", new IntegerSerializer(), new StringSerializer());
final TestInputTopic<Integer, String> bTopic = topologyTestDriver.createInputTopic("B", new IntegerSerializer(), new StringSerializer());
final TestOutputTopic<Integer, String> output = topologyTestDriver.createOutputTopic("output", new IntegerDeserializer(), new StringDeserializer());
aTopic.pipeInput(1, "999-alpha");
bTopic.pipeInput(999, "beta");
final Map<Integer, String> x = output.readKeyValuesToMap();
assertThat(x, is(Collections.singletonMap(1, "(999-alpha,(999-alpha,beta))")));
}
}
Aggregations