use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class KStreamsFineGrainedAutoResetIntegrationTest method shouldThrowStreamsExceptionNoResetSpecified.
@Test
public void shouldThrowStreamsExceptionNoResetSpecified() throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
Properties localConfig = StreamsTestUtils.getStreamsConfig("testAutoOffsetWithNone", CLUSTER.bootstrapServers(), STRING_SERDE_CLASSNAME, STRING_SERDE_CLASSNAME, props);
final KStreamBuilder builder = new KStreamBuilder();
final KStream<String, String> exceptionStream = builder.stream(NOOP);
exceptionStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
KafkaStreams streams = new KafkaStreams(builder, localConfig);
final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler();
final TestCondition correctExceptionThrownCondition = new TestCondition() {
@Override
public boolean conditionMet() {
return uncaughtExceptionHandler.correctExceptionThrown;
}
};
streams.setUncaughtExceptionHandler(uncaughtExceptionHandler);
streams.start();
TestUtils.waitForCondition(correctExceptionThrownCondition, "The expected NoOffsetForPartitionException was never thrown");
streams.close();
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class KStreamsFineGrainedAutoResetIntegrationTest method shouldThrowExceptionOverlappingTopic.
@Test(expected = TopologyBuilderException.class)
public void shouldThrowExceptionOverlappingTopic() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
//NOTE this would realistically get caught when building topology, the test is for completeness
final KStream<String, String> pattern1Stream = builder.stream(KStreamBuilder.AutoOffsetReset.EARLIEST, Pattern.compile("topic-[A-D]"));
final KStream<String, String> pattern2Stream = builder.stream(KStreamBuilder.AutoOffsetReset.LATEST, Pattern.compile("topic-\\d]"));
final KStream<String, String> namedTopicsStream = builder.stream(KStreamBuilder.AutoOffsetReset.LATEST, TOPIC_A, TOPIC_Z);
builder.latestResetTopicsPattern();
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class QueryableStateIntegrationTest method shouldBeAbleToQueryState.
@Test
public void shouldBeAbleToQueryState() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
final String[] keys = { "hello", "goodbye", "welcome", "go", "kafka" };
final Set<KeyValue<String, String>> batch1 = new TreeSet<>(stringComparator);
batch1.addAll(Arrays.asList(new KeyValue<>(keys[0], "hello"), new KeyValue<>(keys[1], "goodbye"), new KeyValue<>(keys[2], "welcome"), new KeyValue<>(keys[3], "go"), new KeyValue<>(keys[4], "kafka")));
final Set<KeyValue<String, Long>> expectedCount = new TreeSet<>(stringLongComparator);
for (final String key : keys) {
expectedCount.add(new KeyValue<>(key, 1L));
}
IntegrationTestUtils.produceKeyValuesSynchronously(streamOne, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final KStream<String, String> s1 = builder.stream(streamOne);
// Non Windowed
s1.groupByKey().count("my-count").to(Serdes.String(), Serdes.Long(), outputTopic);
s1.groupByKey().count(TimeWindows.of(WINDOW_SIZE), "windowed-count");
kafkaStreams = new KafkaStreams(builder, streamsConfiguration);
kafkaStreams.start();
waitUntilAtLeastNumRecordProcessed(outputTopic, 1);
final ReadOnlyKeyValueStore<String, Long> myCount = kafkaStreams.store("my-count", QueryableStoreTypes.<String, Long>keyValueStore());
final ReadOnlyWindowStore<String, Long> windowStore = kafkaStreams.store("windowed-count", QueryableStoreTypes.<String, Long>windowStore());
verifyCanGetByKey(keys, expectedCount, expectedCount, windowStore, myCount);
verifyRangeAndAll(expectedCount, myCount);
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class QueryableStateIntegrationTest method shouldNotMakeStoreAvailableUntilAllStoresAvailable.
@Test
public void shouldNotMakeStoreAvailableUntilAllStoresAvailable() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
final KStream<String, String> stream = builder.stream(streamThree);
final String storeName = "count-by-key";
stream.groupByKey().count(storeName);
kafkaStreams = new KafkaStreams(builder, streamsConfiguration);
kafkaStreams.start();
final KeyValue<String, String> hello = KeyValue.pair("hello", "hello");
IntegrationTestUtils.produceKeyValuesSynchronously(streamThree, Arrays.asList(hello, hello, hello, hello, hello, hello, hello, hello), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
final int maxWaitMs = 30000;
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
try {
kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore());
return true;
} catch (InvalidStateStoreException ise) {
return false;
}
}
}, maxWaitMs, "waiting for store " + storeName);
final ReadOnlyKeyValueStore<String, Long> store = kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore());
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return new Long(8).equals(store.get("hello"));
}
}, maxWaitMs, "wait for count to be 8");
// close stream
kafkaStreams.close();
// start again
kafkaStreams = new KafkaStreams(builder, streamsConfiguration);
kafkaStreams.start();
// make sure we never get any value other than 8 for hello
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
try {
assertEquals(Long.valueOf(8L), kafkaStreams.store(storeName, QueryableStoreTypes.<String, Long>keyValueStore()).get("hello"));
return true;
} catch (InvalidStateStoreException ise) {
return false;
}
}
}, maxWaitMs, "waiting for store " + storeName);
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class RegexSourceIntegrationTest method testNoMessagesSentExceptionFromOverlappingPatterns.
// TODO should be updated to expected = TopologyBuilderException after KAFKA-3708
@Test(expected = AssertionError.class)
public void testNoMessagesSentExceptionFromOverlappingPatterns() throws Exception {
final String fooMessage = "fooMessage";
final String fMessage = "fMessage";
final Serde<String> stringSerde = Serdes.String();
final KStreamBuilder builder = new KStreamBuilder();
// overlapping patterns here, no messages should be sent as TopologyBuilderException
// will be thrown when the processor topology is built.
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("foo.*"));
final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("f.*"));
pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
pattern2Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
streams.start();
final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class);
IntegrationTestUtils.produceValuesSynchronously(FA_TOPIC, Arrays.asList(fMessage), producerConfig, mockTime);
IntegrationTestUtils.produceValuesSynchronously(FOO_TOPIC, Arrays.asList(fooMessage), producerConfig, mockTime);
final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);
try {
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, DEFAULT_OUTPUT_TOPIC, 2, 5000);
fail("Should not get here");
} finally {
streams.close();
}
}
Aggregations