use of org.junit.rules.TestName in project kafka by apache.
the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.
@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
try {
assertTrue((new File(appDir, "dummy")).createNewFile());
} catch (final IOException e) {
throw new RuntimeException("Failed to create dummy file.", e);
}
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// Application state store exists
assertTrue(appDir.exists());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.junit.rules.TestName in project kafka by apache.
the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.
@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// case 1: the state directory is cleaned up without any problems.
// case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
// case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.junit.rules.TestName in project kafka by apache.
the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.
private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
final String testId = safeUniqueTestName(getClass(), testName);
final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
final String inputTopic = "input" + testId;
final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
final String storeName = "store" + testId;
final String storeChangelog = appId + "-store" + testId + "-changelog";
final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
final ReentrantLock assignmentLock = new ReentrantLock();
final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
final AtomicBoolean assignmentStable = new AtomicBoolean(false);
final AssignmentListener assignmentListener = stable -> {
assignmentLock.lock();
try {
final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
assignmentsStable.put(thisAssignmentIndex, stable);
assignmentStable.set(stable);
} finally {
assignmentLock.unlock();
}
};
final StreamsBuilder builder = new StreamsBuilder();
builder.table(inputTopic, materializedFunction.apply(storeName));
final Topology topology = builder.build();
final int numberOfRecords = 500;
produceTestData(inputTopic, numberOfRecords);
try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
kafkaStreams0.start();
// sanity check: just make sure we actually wrote all the input records
TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
// wait until all the input records are in the changelog
TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
final AtomicLong instance1TotalRestored = new AtomicLong(-1);
final AtomicLong instance1NumRestored = new AtomicLong(-1);
final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
restoreCompleteLatch.countDown();
}
});
final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
kafkaStreams1.start();
TestUtils.waitForCondition(() -> {
assignmentLock.lock();
try {
if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
return true;
} else {
return false;
}
} finally {
assignmentLock.unlock();
}
}, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
restoreCompleteLatch.await();
// We should finalize the restoration without having restored any records (because they're already in
// the store. Otherwise, we failed to properly re-use the state from the standby.
assertThat(instance1TotalRestored.get(), is(0L));
// Belt-and-suspenders check that we never even attempt to restore any records.
assertThat(instance1NumRestored.get(), is(-1L));
}
}
use of org.junit.rules.TestName in project kafka by apache.
the class QueryableStateIntegrationTest method before.
@Before
public void before() throws Exception {
createTopics();
streamsConfiguration = new Properties();
final String safeTestName = safeUniqueTestName(getClass(), testName);
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
stringComparator = Comparator.comparing((KeyValue<String, String> o) -> o.key).thenComparing(o -> o.value);
stringLongComparator = Comparator.comparing((KeyValue<String, Long> o) -> o.key).thenComparingLong(o -> o.value);
inputValues = getInputValues();
inputValuesKeys = new HashSet<>();
for (final String sentence : inputValues) {
final String[] words = sentence.split("\\W+");
Collections.addAll(inputValuesKeys, words);
}
}
use of org.junit.rules.TestName in project kafka by apache.
the class RegexSourceIntegrationTest method setUp.
@Before
public void setUp() throws InterruptedException {
outputTopic = createTopic(topicSuffixGenerator.incrementAndGet());
final Properties properties = new Properties();
properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
properties.put(StreamsConfig.MAX_TASK_IDLE_MS_CONFIG, 0L);
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
streamsConfiguration = StreamsTestUtils.getStreamsConfig(IntegrationTestUtils.safeUniqueTestName(RegexSourceIntegrationTest.class, new TestName()), CLUSTER.bootstrapServers(), STRING_SERDE_CLASSNAME, STRING_SERDE_CLASSNAME, properties);
}
Aggregations