use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class ProcessorTopologyTest method testPrefixScanInMemoryStoreWithCachingWithLoggingOldProcessor.
// testing old PAPI
@Deprecated
@Test
public void testPrefixScanInMemoryStoreWithCachingWithLoggingOldProcessor() {
final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()).withCachingEnabled().withLoggingEnabled(Collections.emptyMap());
topology.addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1).addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1").addSink("counts", OUTPUT_TOPIC_1, "processor1");
driver = new TopologyTestDriver(topology, props);
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER);
final TestOutputTopic<Integer, String> outputTopic1 = driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer());
inputTopic.pipeInput("key1", "value1");
inputTopic.pipeInput("key2", "value2");
inputTopic.pipeInput("key3", "value3");
inputTopic.pipeInput("key1", "value4");
assertTrue(outputTopic1.isEmpty());
final KeyValueStore<String, String> store = driver.getKeyValueStore(DEFAULT_STORE_NAME);
final List<KeyValue<String, String>> results = prefixScanResults(store, DEFAULT_PREFIX);
assertEquals("key1", results.get(0).key);
assertEquals("value4", results.get(0).value);
assertEquals("key2", results.get(1).key);
assertEquals("value2", results.get(1).value);
assertEquals("key3", results.get(2).key);
assertEquals("value3", results.get(2).value);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class QueryableStateIntegrationTest method shouldBeAbleToQueryMapValuesState.
@Test
public void shouldBeAbleToQueryMapValuesState() throws Exception {
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
final StreamsBuilder builder = new StreamsBuilder();
final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
final Set<KeyValue<String, String>> batch1 = new HashSet<>(Arrays.asList(new KeyValue<>(keys[0], "1"), new KeyValue<>(keys[1], "1"), new KeyValue<>(keys[2], "3"), new KeyValue<>(keys[3], "5"), new KeyValue<>(keys[4], "2")));
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final KTable<String, String> t1 = builder.table(streamOne);
t1.mapValues((ValueMapper<String, Long>) Long::valueOf, Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("queryMapValues").withValueSerde(Serdes.Long())).toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
startKafkaStreamsAndWaitForRunningState(kafkaStreams);
waitUntilAtLeastNumRecordProcessed(outputTopic, 5);
final ReadOnlyKeyValueStore<String, Long> myMapStore = IntegrationTestUtils.getStore("queryMapValues", kafkaStreams, keyValueStore());
for (final KeyValue<String, String> batchEntry : batch1) {
assertEquals(Long.valueOf(batchEntry.value), myMapStore.get(batchEntry.key));
}
try (final KeyValueIterator<String, Long> range = myMapStore.range("hello", "kafka")) {
while (range.hasNext()) {
System.out.println(range.next());
}
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class QueryableStateIntegrationTest method shouldBeAbleToQueryMapValuesAfterFilterState.
@Test
public void shouldBeAbleToQueryMapValuesAfterFilterState() throws Exception {
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
final StreamsBuilder builder = new StreamsBuilder();
final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
final Set<KeyValue<String, String>> batch1 = new HashSet<>(Arrays.asList(new KeyValue<>(keys[0], "1"), new KeyValue<>(keys[1], "1"), new KeyValue<>(keys[2], "3"), new KeyValue<>(keys[3], "5"), new KeyValue<>(keys[4], "2")));
final Set<KeyValue<String, Long>> expectedBatch1 = new HashSet<>(Collections.singleton(new KeyValue<>(keys[4], 2L)));
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final Predicate<String, String> filterPredicate = (key, value) -> key.contains("kafka");
final KTable<String, String> t1 = builder.table(streamOne);
final KTable<String, String> t2 = t1.filter(filterPredicate, Materialized.as("queryFilter"));
final KTable<String, Long> t3 = t2.mapValues((ValueMapper<String, Long>) Long::valueOf, Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("queryMapValues").withValueSerde(Serdes.Long()));
t3.toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
startKafkaStreamsAndWaitForRunningState(kafkaStreams);
waitUntilAtLeastNumRecordProcessed(outputTopic, 1);
final ReadOnlyKeyValueStore<String, Long> myMapStore = IntegrationTestUtils.getStore("queryMapValues", kafkaStreams, keyValueStore());
for (final KeyValue<String, Long> expectedEntry : expectedBatch1) {
assertEquals(expectedEntry.value, myMapStore.get(expectedEntry.key));
}
for (final KeyValue<String, String> batchEntry : batch1) {
final KeyValue<String, Long> batchEntryMapValue = new KeyValue<>(batchEntry.key, Long.valueOf(batchEntry.value));
if (!expectedBatch1.contains(batchEntryMapValue)) {
assertNull(myMapStore.get(batchEntry.key));
}
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.
@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
try {
assertTrue((new File(appDir, "dummy")).createNewFile());
} catch (final IOException e) {
throw new RuntimeException("Failed to create dummy file.", e);
}
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// Application state store exists
assertTrue(appDir.exists());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class RestoreIntegrationTest method shouldProcessDataFromStoresWithLoggingDisabled.
@Test
public void shouldProcessDataFromStoresWithLoggingDisabled() throws InterruptedException {
IntegrationTestUtils.produceKeyValuesSynchronously(inputStream, asList(KeyValue.pair(1, 1), KeyValue.pair(2, 2), KeyValue.pair(3, 3)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, IntegerSerializer.class), CLUSTER.time);
final KeyValueBytesStoreSupplier lruMapSupplier = Stores.lruMap(inputStream, 10);
final StoreBuilder<KeyValueStore<Integer, Integer>> storeBuilder = new KeyValueStoreBuilder<>(lruMapSupplier, Serdes.Integer(), Serdes.Integer(), CLUSTER.time).withLoggingDisabled();
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.addStateStore(storeBuilder);
final KStream<Integer, Integer> stream = streamsBuilder.stream(inputStream);
final CountDownLatch processorLatch = new CountDownLatch(3);
stream.process(() -> new KeyValueStoreProcessor(inputStream, processorLatch), inputStream);
final Topology topology = streamsBuilder.build();
kafkaStreams = new KafkaStreams(topology, props());
final CountDownLatch latch = new CountDownLatch(1);
kafkaStreams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
latch.countDown();
}
});
kafkaStreams.start();
latch.await(30, TimeUnit.SECONDS);
assertTrue(processorLatch.await(30, TimeUnit.SECONDS));
}
Aggregations