use of org.apache.kafka.streams.state.KeyValueStore in project apache-kafka-on-k8s by banzaicloud.
the class InMemoryLRUCacheStoreTest method createKeyValueStore.
@SuppressWarnings("unchecked")
@Override
protected <K, V> KeyValueStore<K, V> createKeyValueStore(final ProcessorContext context) {
final StoreBuilder storeBuilder = Stores.keyValueStoreBuilder(Stores.lruMap("my-store", 10), (Serde<K>) context.keySerde(), (Serde<V>) context.valueSerde());
final StateStore store = storeBuilder.build();
store.init(context, store);
return (KeyValueStore<K, V>) store;
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka-streams-examples by confluentinc.
the class WordCountInteractiveQueriesExample method createStreams.
static KafkaStreams createStreams(final Properties streamsConfiguration) {
final Serde<String> stringSerde = Serdes.String();
StreamsBuilder builder = new StreamsBuilder();
KStream<String, String> textLines = builder.stream(TEXT_LINES_TOPIC, Consumed.with(Serdes.String(), Serdes.String()));
final KGroupedStream<String, String> groupedByWord = textLines.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+"))).groupBy((key, word) -> word, Serialized.with(stringSerde, stringSerde));
// Create a State Store for with the all time word count
groupedByWord.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("word-count").withValueSerde(Serdes.Long()));
// Create a Windowed State Store that contains the word count for every
// 1 minute
groupedByWord.windowedBy(TimeWindows.of(60000)).count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("windowed-word-count").withValueSerde(Serdes.Long()));
return new KafkaStreams(builder.build(), streamsConfiguration);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka-streams-examples by confluentinc.
the class StateStoresInTheDSLIntegrationTest method shouldAllowStateStoreAccessFromDSL.
@Test
public void shouldAllowStateStoreAccessFromDSL() throws Exception {
List<String> inputValues = Arrays.asList("foo", "bar", "foo", "quux", "bar", "foo");
List<KeyValue<String, Long>> expectedRecords = Arrays.asList(new KeyValue<>("foo", 1L), new KeyValue<>("bar", 1L), new KeyValue<>("foo", 2L), new KeyValue<>("quux", 1L), new KeyValue<>("bar", 2L), new KeyValue<>("foo", 3L));
//
// Step 1: Configure and start the processor topology.
//
StreamsBuilder builder = new StreamsBuilder();
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "state-store-dsl-lambda-integration-test");
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Use a temporary directory for storing state, which will be automatically removed after the test.
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
// Create a state store manually.
StoreBuilder<KeyValueStore<String, Long>> wordCountsStore = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("WordCountsStore"), Serdes.String(), Serdes.Long()).withCachingEnabled();
// Important (1 of 2): You must add the state store to the topology, otherwise your application
// will fail at run-time (because the state store is referred to in `transform()` below.
builder.addStateStore(wordCountsStore);
// Read the input data. (In this example we ignore whatever is stored in the record keys.)
KStream<byte[], String> words = builder.stream(inputTopic);
// Important (2 of 2): When we call `transform()` we must provide the name of the state store
// that is going to be used by the `Transformer` returned by `WordCountTransformerSupplier` as
// the second parameter of `transform()` (note: we are also passing the state store name to the
// constructor of `WordCountTransformerSupplier`, which we do primarily for cleaner code).
// Otherwise our application will fail at run-time when attempting to operate on the state store
// (within the transformer) because `ProcessorContext#getStateStore("WordCountsStore")` will
// return `null`.
KStream<String, Long> wordCounts = words.transform(new WordCountTransformerSupplier(wordCountsStore.name()), wordCountsStore.name());
wordCounts.to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
streams.start();
//
// Step 2: Produce some input data to the input topic.
//
Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues, producerConfig);
//
// Step 3: Verify the application's output data.
//
Properties consumerConfig = new Properties();
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "state-store-dsl-lambda-integration-test-standard-consumer");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
List<KeyValue<String, Long>> actualValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedRecords.size());
streams.close();
assertThat(actualValues).isEqualTo(expectedRecords);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class CogroupedKStreamImplTest method shouldAllowDifferentOutputTypeInCoGroup.
@Test
public void shouldAllowDifferentOutputTypeInCoGroup() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, String> stream2 = builder.stream("two", stringConsumed);
final KGroupedStream<String, String> grouped1 = stream1.groupByKey();
final KGroupedStream<String, String> grouped2 = stream2.groupByKey();
final KTable<String, Integer> customers = grouped1.cogroup(STRING_SUM_AGGREGATOR).cogroup(grouped2, STRING_SUM_AGGREGATOR).aggregate(SUM_INITIALIZER, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("store1").withValueSerde(Serdes.Integer()));
customers.toStream().to(OUTPUT);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> testInputTopic = driver.createInputTopic("one", new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> testInputTopic2 = driver.createInputTopic("two", new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, Integer> testOutputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new IntegerDeserializer());
testInputTopic.pipeInput("k1", "1", 0L);
testInputTopic.pipeInput("k2", "1", 1L);
testInputTopic.pipeInput("k1", "1", 10L);
testInputTopic.pipeInput("k2", "1", 100L);
testInputTopic2.pipeInput("k2", "2", 100L);
testInputTopic2.pipeInput("k2", "2", 200L);
testInputTopic2.pipeInput("k1", "2", 1L);
testInputTopic2.pipeInput("k2", "2", 500L);
testInputTopic2.pipeInput("k1", "2", 500L);
testInputTopic2.pipeInput("k2", "3", 500L);
testInputTopic2.pipeInput("k3", "2", 500L);
testInputTopic2.pipeInput("k2", "2", 100L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 1, 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 1, 1);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 2, 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 2, 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 4, 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 6, 200);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 4, 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 8, 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 6, 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 11, 500);
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class CogroupedKStreamImplTest method testCogroupKeyMixedAggregators.
@Test
public void testCogroupKeyMixedAggregators() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, String> stream2 = builder.stream("two", stringConsumed);
final KGroupedStream<String, String> grouped1 = stream1.groupByKey();
final KGroupedStream<String, String> grouped2 = stream2.groupByKey();
final KTable<String, String> customers = grouped1.cogroup(MockAggregator.TOSTRING_REMOVER).cogroup(grouped2, MockAggregator.TOSTRING_ADDER).aggregate(MockInitializer.STRING_INIT, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("store1").withValueSerde(Serdes.String()));
customers.toStream().to(OUTPUT);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> testInputTopic = driver.createInputTopic("one", new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> testInputTopic2 = driver.createInputTopic("two", new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> testOutputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
testInputTopic.pipeInput("k1", "1", 0L);
testInputTopic.pipeInput("k2", "1", 1L);
testInputTopic.pipeInput("k1", "1", 10L);
testInputTopic.pipeInput("k2", "1", 100L);
testInputTopic2.pipeInput("k1", "2", 500L);
testInputTopic2.pipeInput("k2", "2", 500L);
testInputTopic2.pipeInput("k1", "2", 500L);
testInputTopic2.pipeInput("k2", "2", 100L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0-1", 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0-1", 1);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0-1-1", 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0-1-1", 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0-1-1+2", 500L);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0-1-1+2", 500L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0-1-1+2+2", 500L);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0-1-1+2+2", 500L);
}
}
Aggregations