use of org.apache.kafka.streams.Topology in project kafka by apache.
the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.
@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// case 1: the state directory is cleaned up without any problems.
// case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
// case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class RepartitionTopicNamingTest method shouldNotFailWithSameRepartitionTopicNameUsingSameKGroupedStreamOptimizationsOn.
@Test
public void shouldNotFailWithSameRepartitionTopicNameUsingSameKGroupedStreamOptimizationsOn() {
final StreamsBuilder builder = new StreamsBuilder();
final KGroupedStream<String, String> kGroupedStream = builder.<String, String>stream("topic").selectKey((k, v) -> k).groupByKey(Grouped.as("grouping"));
kGroupedStream.windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofMillis(10L))).count();
kGroupedStream.windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofMillis(30L))).count();
final Properties properties = new Properties();
properties.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
final Topology topology = builder.build(properties);
assertThat(getCountOfRepartitionTopicsFound(topology.describe().toString(), repartitionTopicPattern), is(1));
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class ProcessorNodeTest method testTopologyLevelClassCastException.
@Test
public void testTopologyLevelClassCastException() {
// Serdes configuration is missing and no default is set which will trigger an exception
final StreamsBuilder builder = new StreamsBuilder();
builder.<String, String>stream("streams-plaintext-input").flatMapValues(value -> Collections.singletonList(""));
final Topology topology = builder.build();
final Properties config = new Properties();
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
try (final TopologyTestDriver testDriver = new TopologyTestDriver(topology, config)) {
final TestInputTopic<String, String> topic = testDriver.createInputTopic("streams-plaintext-input", new StringSerializer(), new StringSerializer());
final StreamsException se = assertThrows(StreamsException.class, () -> topic.pipeInput("a-key", "a value"));
final String msg = se.getMessage();
assertTrue("Error about class cast with serdes", msg.contains("ClassCastException"));
assertTrue("Error about class cast with serdes", msg.contains("Serdes"));
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class HandlingSourceTopicDeletionIntegrationTest method shouldThrowErrorAfterSourceTopicDeleted.
@Test
public void shouldThrowErrorAfterSourceTopicDeleted() throws InterruptedException {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Integer(), Serdes.String())).to(OUTPUT_TOPIC, Produced.with(Serdes.Integer(), Serdes.String()));
final String safeTestName = safeUniqueTestName(getClass(), testName);
final String appId = "app-" + safeTestName;
final Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, appId);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, NUM_THREADS);
streamsConfiguration.put(StreamsConfig.METADATA_MAX_AGE_CONFIG, 2000);
final Topology topology = builder.build();
final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsConfiguration);
final AtomicBoolean calledUncaughtExceptionHandler1 = new AtomicBoolean(false);
kafkaStreams1.setUncaughtExceptionHandler(exception -> {
calledUncaughtExceptionHandler1.set(true);
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
kafkaStreams1.start();
final KafkaStreams kafkaStreams2 = new KafkaStreams(topology, streamsConfiguration);
final AtomicBoolean calledUncaughtExceptionHandler2 = new AtomicBoolean(false);
kafkaStreams2.setUncaughtExceptionHandler(exception -> {
calledUncaughtExceptionHandler2.set(true);
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
kafkaStreams2.start();
TestUtils.waitForCondition(() -> kafkaStreams1.state() == State.RUNNING && kafkaStreams2.state() == State.RUNNING, TIMEOUT, () -> "Kafka Streams clients did not reach state RUNNING");
CLUSTER.deleteTopicAndWait(INPUT_TOPIC);
TestUtils.waitForCondition(() -> kafkaStreams1.state() == State.ERROR && kafkaStreams2.state() == State.ERROR, TIMEOUT, () -> "Kafka Streams clients did not reach state ERROR");
assertThat(calledUncaughtExceptionHandler1.get(), is(true));
assertThat(calledUncaughtExceptionHandler2.get(), is(true));
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class StreamsGraphTest method shouldNotOptimizeWithValueOrKeyChangingOperatorsAfterInitialKeyChange.
@Test
public void shouldNotOptimizeWithValueOrKeyChangingOperatorsAfterInitialKeyChange() {
final Topology attemptedOptimize = getTopologyWithChangingValuesAfterChangingKey(StreamsConfig.OPTIMIZE);
final Topology noOptimization = getTopologyWithChangingValuesAfterChangingKey(StreamsConfig.NO_OPTIMIZATION);
assertEquals(attemptedOptimize.describe().toString(), noOptimization.describe().toString());
assertEquals(2, getCountOfRepartitionTopicsFound(attemptedOptimize.describe().toString()));
assertEquals(2, getCountOfRepartitionTopicsFound(noOptimization.describe().toString()));
}
Aggregations