use of org.apache.kafka.streams.StreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class KStreamsFineGrainedAutoResetIntegrationTest method shouldThrowExceptionOverlappingTopic.
@Test
public void shouldThrowExceptionOverlappingTopic() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
// NOTE this would realistically get caught when building topology, the test is for completeness
builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(Topology.AutoOffsetReset.EARLIEST));
try {
builder.stream(Arrays.asList(TOPIC_A_1, TOPIC_Z_1), Consumed.with(Topology.AutoOffsetReset.LATEST));
fail("Should have thrown TopologyException");
} catch (final TopologyException expected) {
// do nothing
}
}
use of org.apache.kafka.streams.StreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class KStreamsFineGrainedAutoResetIntegrationTest method shouldOnlyReadForEarliest.
private void shouldOnlyReadForEarliest(final String topicSuffix, final String topic1, final String topic2, final String topicA, final String topicC, final String topicY, final String topicZ, final String outputTopic, final List<String> expectedReceivedValues) throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("topic-\\d" + topicSuffix), Consumed.<String, String>with(Topology.AutoOffsetReset.EARLIEST));
final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]" + topicSuffix), Consumed.<String, String>with(Topology.AutoOffsetReset.LATEST));
final KStream<String, String> namedTopicsStream = builder.stream(Arrays.asList(topicY, topicZ));
pattern1Stream.to(stringSerde, stringSerde, outputTopic);
pattern2Stream.to(stringSerde, stringSerde, outputTopic);
namedTopicsStream.to(stringSerde, stringSerde, outputTopic);
final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class);
IntegrationTestUtils.produceValuesSynchronously(topic1, Collections.singletonList(topic1TestMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(topic2, Collections.singletonList(topic2TestMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(topicA, Collections.singletonList(topicATestMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(topicC, Collections.singletonList(topicCTestMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(topicY, Collections.singletonList(topicYTestMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(topicZ, Collections.singletonList(topicZTestMessage), producerConfig, mockTime);
final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
streams.start();
final List<KeyValue<String, String>> receivedKeyValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedReceivedValues.size());
final List<String> actualValues = new ArrayList<>(expectedReceivedValues.size());
for (final KeyValue<String, String> receivedKeyValue : receivedKeyValues) {
actualValues.add(receivedKeyValue.value);
}
streams.close();
Collections.sort(actualValues);
Collections.sort(expectedReceivedValues);
assertThat(actualValues, equalTo(expectedReceivedValues));
}
use of org.apache.kafka.streams.StreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class QueryableStateIntegrationTest method shouldNotMakeStoreAvailableUntilAllStoresAvailable.
@Test
public void shouldNotMakeStoreAvailableUntilAllStoresAvailable() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream(streamThree);
final String storeName = "count-by-key";
stream.groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as(storeName));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
kafkaStreams.start();
final KeyValue<String, String> hello = KeyValue.pair("hello", "hello");
IntegrationTestUtils.produceKeyValuesSynchronously(streamThree, Arrays.asList(hello, hello, hello, hello, hello, hello, hello, hello), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final int maxWaitMs = 30000;
TestUtils.waitForCondition(new WaitForStore(storeName), maxWaitMs, "waiting for store " + storeName);
final ReadOnlyKeyValueStore<String, Long> store = kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore());
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return new Long(8).equals(store.get("hello"));
}
}, maxWaitMs, "wait for count to be 8");
// close stream
kafkaStreams.close();
// start again
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
kafkaStreams.start();
// make sure we never get any value other than 8 for hello
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
try {
assertEquals(Long.valueOf(8L), kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore()).get("hello"));
return true;
} catch (final InvalidStateStoreException ise) {
return false;
}
}
}, maxWaitMs, "waiting for store " + storeName);
}
use of org.apache.kafka.streams.StreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class QueryableStateIntegrationTest method verifyCanQueryState.
private void verifyCanQueryState(final int cacheSizeBytes) throws Exception {
streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, cacheSizeBytes);
final StreamsBuilder builder = new StreamsBuilder();
final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
final Set<KeyValue<String, String>> batch1 = new TreeSet<>(stringComparator);
batch1.addAll(Arrays.asList(new KeyValue<>(keys[0], "hello"), new KeyValue<>(keys[1], "goodbye"), new KeyValue<>(keys[2], "welcome"), new KeyValue<>(keys[3], "go"), new KeyValue<>(keys[4], "kafka")));
final Set<KeyValue<String, Long>> expectedCount = new TreeSet<>(stringLongComparator);
for (final String key : keys) {
expectedCount.add(new KeyValue<>(key, 1L));
}
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final KStream<String, String> s1 = builder.stream(streamOne);
// Non Windowed
final String storeName = "my-count";
s1.groupByKey().count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as(storeName)).toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
final String windowStoreName = "windowed-count";
s1.groupByKey().windowedBy(TimeWindows.of(WINDOW_SIZE)).count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as(windowStoreName));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
kafkaStreams.start();
waitUntilAtLeastNumRecordProcessed(outputTopic, 1);
final ReadOnlyKeyValueStore<String, Long> myCount = kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore());
final ReadOnlyWindowStore<String, Long> windowStore = kafkaStreams.store(windowStoreName, QueryableStoreTypes.<String, Long>windowStore());
verifyCanGetByKey(keys, expectedCount, expectedCount, windowStore, myCount);
verifyRangeAndAll(expectedCount, myCount);
}
use of org.apache.kafka.streams.StreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class QueryableStateIntegrationTest method shouldAllowToQueryAfterThreadDied.
@Test
public void shouldAllowToQueryAfterThreadDied() throws Exception {
final AtomicBoolean beforeFailure = new AtomicBoolean(true);
final AtomicBoolean failed = new AtomicBoolean(false);
final String storeName = "store";
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> input = builder.stream(streamOne);
input.groupByKey().reduce(new Reducer<String>() {
@Override
public String apply(final String value1, final String value2) {
if (value1.length() > 1) {
if (beforeFailure.compareAndSet(true, false)) {
throw new RuntimeException("Injected test exception");
}
}
return value1 + value2;
}
}, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName)).toStream().to(outputTopic);
streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration);
kafkaStreams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(final Thread t, final Throwable e) {
failed.set(true);
}
});
kafkaStreams.start();
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, Arrays.asList(KeyValue.pair("a", "1"), KeyValue.pair("a", "2"), KeyValue.pair("b", "3"), KeyValue.pair("b", "4")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final int maxWaitMs = 30000;
TestUtils.waitForCondition(new WaitForStore(storeName), maxWaitMs, "waiting for store " + storeName);
final ReadOnlyKeyValueStore<String, String> store = kafkaStreams.store(storeName, QueryableStoreTypes.<String, String>keyValueStore());
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return "12".equals(store.get("a")) && "34".equals(store.get("b"));
}
}, maxWaitMs, "wait for agg to be <a,12> and <b,34>");
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, Collections.singleton(KeyValue.pair("a", "5")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return failed.get();
}
}, 30000, "wait for thread to fail");
TestUtils.waitForCondition(new WaitForStore(storeName), maxWaitMs, "waiting for store " + storeName);
final ReadOnlyKeyValueStore<String, String> store2 = kafkaStreams.store(storeName, QueryableStoreTypes.<String, String>keyValueStore());
try {
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return ("125".equals(store2.get("a")) || "1225".equals(store2.get("a")) || "12125".equals(store2.get("a"))) && ("34".equals(store2.get("b")) || "344".equals(store2.get("b")) || "3434".equals(store2.get("b")));
}
}, maxWaitMs, "wait for agg to be <a,125>||<a,1225>||<a,12125> and <b,34>||<b,344>||<b,3434>");
} catch (final Throwable t) {
throw new RuntimeException("Store content is a: " + store2.get("a") + "; b: " + store2.get("b"), t);
}
}
Aggregations