use of org.apache.kafka.streams.KeyValue in project kafka-streams-examples by confluentinc.
the class EmailServiceTest method shouldSendEmailWithValidContents.
@Test
public void shouldSendEmailWithValidContents() throws Exception {
// Given one order, customer and payment
String orderId = id(0L);
Order order = new Order(orderId, 15L, CREATED, UNDERPANTS, 3, 5.00d);
Customer customer = new Customer(15L, "Franz", "Kafka", "frans@thedarkside.net", "oppression street, prague, cze");
Payment payment = new Payment("Payment:1234", orderId, "CZK", 1000.00d);
emailService = new EmailService(details -> {
assertThat(details.customer).isEqualTo(customer);
assertThat(details.payment).isEqualTo(payment);
assertThat(details.order).isEqualTo(order);
complete = true;
});
send(Topics.CUSTOMERS, Collections.singleton(new KeyValue<>(customer.getId(), customer)));
send(Topics.ORDERS, Collections.singleton(new KeyValue<>(order.getId(), order)));
send(Topics.PAYMENTS, Collections.singleton(new KeyValue<>(payment.getId(), payment)));
// When
emailService.start(CLUSTER.bootstrapServers());
// Then
TestUtils.waitForCondition(() -> complete, "Email was never sent.");
}
use of org.apache.kafka.streams.KeyValue in project kafka-streams-examples by confluentinc.
the class MicroserviceTestUtils method readKeysAndValues.
private static <K, V> List<KeyValue<K, V>> readKeysAndValues(int numberToRead, String bootstrapServers, Deserializer<K> keyDes, Deserializer<V> valDes, String topicName) throws InterruptedException {
Properties consumerConfig = new Properties();
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "Test-Reader-" + consumerCounter++);
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KafkaConsumer<K, V> consumer = new KafkaConsumer<>(consumerConfig, keyDes, valDes);
consumer.subscribe(singletonList(topicName));
List<KeyValue<K, V>> actualValues = new ArrayList<>();
TestUtils.waitForCondition(() -> {
ConsumerRecords<K, V> records = consumer.poll(100);
for (ConsumerRecord<K, V> record : records) {
actualValues.add(KeyValue.pair(record.key(), record.value()));
}
return actualValues.size() == numberToRead;
}, 20000, "Timed out reading orders.");
consumer.close();
return actualValues;
}
use of org.apache.kafka.streams.KeyValue in project kafka-streams-examples by confluentinc.
the class StateStoresInTheDSLIntegrationTest method shouldAllowStateStoreAccessFromDSL.
@Test
public void shouldAllowStateStoreAccessFromDSL() throws Exception {
List<String> inputValues = Arrays.asList("foo", "bar", "foo", "quux", "bar", "foo");
List<KeyValue<String, Long>> expectedRecords = Arrays.asList(new KeyValue<>("foo", 1L), new KeyValue<>("bar", 1L), new KeyValue<>("foo", 2L), new KeyValue<>("quux", 1L), new KeyValue<>("bar", 2L), new KeyValue<>("foo", 3L));
//
// Step 1: Configure and start the processor topology.
//
StreamsBuilder builder = new StreamsBuilder();
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "state-store-dsl-lambda-integration-test");
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Use a temporary directory for storing state, which will be automatically removed after the test.
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
// Create a state store manually.
StoreBuilder<KeyValueStore<String, Long>> wordCountsStore = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("WordCountsStore"), Serdes.String(), Serdes.Long()).withCachingEnabled();
// Important (1 of 2): You must add the state store to the topology, otherwise your application
// will fail at run-time (because the state store is referred to in `transform()` below.
builder.addStateStore(wordCountsStore);
// Read the input data. (In this example we ignore whatever is stored in the record keys.)
KStream<byte[], String> words = builder.stream(inputTopic);
// Important (2 of 2): When we call `transform()` we must provide the name of the state store
// that is going to be used by the `Transformer` returned by `WordCountTransformerSupplier` as
// the second parameter of `transform()` (note: we are also passing the state store name to the
// constructor of `WordCountTransformerSupplier`, which we do primarily for cleaner code).
// Otherwise our application will fail at run-time when attempting to operate on the state store
// (within the transformer) because `ProcessorContext#getStateStore("WordCountsStore")` will
// return `null`.
KStream<String, Long> wordCounts = words.transform(new WordCountTransformerSupplier(wordCountsStore.name()), wordCountsStore.name());
wordCounts.to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
streams.start();
//
// Step 2: Produce some input data to the input topic.
//
Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues, producerConfig);
//
// Step 3: Verify the application's output data.
//
Properties consumerConfig = new Properties();
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "state-store-dsl-lambda-integration-test-standard-consumer");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
List<KeyValue<String, Long>> actualValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedRecords.size());
streams.close();
assertThat(actualValues).isEqualTo(expectedRecords);
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class KStreamPeekTest method shouldObserveStreamElements.
@Test
public void shouldObserveStreamElements() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream = builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.String()));
final List<KeyValue<Integer, String>> peekObserved = new ArrayList<>(), streamObserved = new ArrayList<>();
stream.peek(collect(peekObserved)).foreach(collect(streamObserved));
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<Integer, String> inputTopic = driver.createInputTopic(topicName, new IntegerSerializer(), new StringSerializer());
final List<KeyValue<Integer, String>> expected = new ArrayList<>();
for (int key = 0; key < 32; key++) {
final String value = "V" + key;
inputTopic.pipeInput(key, value);
expected.add(new KeyValue<>(key, value));
}
assertEquals(expected, peekObserved);
assertEquals(expected, streamObserved);
}
}
use of org.apache.kafka.streams.KeyValue in project kafka by apache.
the class CogroupedKStreamImplTest method shouldInsertRepartitionsTopicForCogroupsUsedTwice.
@Test
public void shouldInsertRepartitionsTopicForCogroupsUsedTwice() {
final StreamsBuilder builder = new StreamsBuilder();
final Properties properties = new Properties();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KGroupedStream<String, String> groupedOne = stream1.map((k, v) -> new KeyValue<>(v, k)).groupByKey(Grouped.as("foo"));
final CogroupedKStream<String, String> one = groupedOne.cogroup(STRING_AGGREGATOR);
one.aggregate(STRING_INITIALIZER);
one.aggregate(STRING_INITIALIZER);
final String topologyDescription = builder.build(properties).describe().toString();
assertThat(topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [one])\n" + " --> KSTREAM-MAP-0000000001\n" + " Processor: KSTREAM-MAP-0000000001 (stores: [])\n" + " --> foo-repartition-filter\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: foo-repartition-filter (stores: [])\n" + " --> foo-repartition-sink\n" + " <-- KSTREAM-MAP-0000000001\n" + " Sink: foo-repartition-sink (topic: foo-repartition)\n" + " <-- foo-repartition-filter\n\n" + " Sub-topology: 1\n" + " Source: foo-repartition-source (topics: [foo-repartition])\n" + " --> COGROUPKSTREAM-AGGREGATE-0000000006, COGROUPKSTREAM-AGGREGATE-0000000012\n" + " Processor: COGROUPKSTREAM-AGGREGATE-0000000006 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" + " --> COGROUPKSTREAM-MERGE-0000000007\n" + " <-- foo-repartition-source\n" + " Processor: COGROUPKSTREAM-AGGREGATE-0000000012 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000008])\n" + " --> COGROUPKSTREAM-MERGE-0000000013\n" + " <-- foo-repartition-source\n" + " Processor: COGROUPKSTREAM-MERGE-0000000007 (stores: [])\n" + " --> none\n" + " <-- COGROUPKSTREAM-AGGREGATE-0000000006\n" + " Processor: COGROUPKSTREAM-MERGE-0000000013 (stores: [])\n" + " --> none\n" + " <-- COGROUPKSTREAM-AGGREGATE-0000000012\n\n"));
}
Aggregations