Search in sources :

Example 1 with TestName

use of org.junit.rules.TestName in project kafka by apache.

the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.

@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
    final String uniqueTestName = safeUniqueTestName(getClass(), testName);
    // Create Topic
    final String input = uniqueTestName + "-input";
    CLUSTER.createTopic(input);
    final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
        // Create Test Records
        producer.send(new ProducerRecord<>(input, "a"));
        producer.send(new ProducerRecord<>(input, "b"));
        producer.send(new ProducerRecord<>(input, "c"));
        // Create Topology
        final String storeName = uniqueTestName + "-input-table";
        final StreamsBuilder builder = new StreamsBuilder();
        builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
        final Topology topology = builder.build();
        // State Store Directory
        final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
        // Create KafkaStreams instance
        final String applicationId = uniqueTestName + "-app";
        final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
        // Create StateListener
        final CountDownLatch runningLatch = new CountDownLatch(1);
        final CountDownLatch notRunningLatch = new CountDownLatch(1);
        final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                runningLatch.countDown();
            }
            if (newState == KafkaStreams.State.NOT_RUNNING) {
                notRunningLatch.countDown();
            }
        };
        streams.setStateListener(stateListener);
        // Application state directory
        final File appDir = new File(stateDir, applicationId);
        // Validate application state directory is created.
        streams.start();
        try {
            runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't start in time.", e);
        }
        // State directory exists
        assertTrue((new File(stateDir)).exists());
        // Application state directory Exists
        assertTrue(appDir.exists());
        try {
            assertTrue((new File(appDir, "dummy")).createNewFile());
        } catch (final IOException e) {
            throw new RuntimeException("Failed to create dummy file.", e);
        }
        // Validate StateStore directory is deleted.
        streams.close();
        try {
            notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't cleaned up in time.", e);
        }
        streams.cleanUp();
        // Root state store exists
        assertTrue((new File(stateDir)).exists());
        // Application state store exists
        assertTrue(appDir.exists());
    } finally {
        CLUSTER.deleteAllTopicsAndWait(0L);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) IOException(java.io.IOException) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 2 with TestName

use of org.junit.rules.TestName in project kafka by apache.

the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.

@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
    final String uniqueTestName = safeUniqueTestName(getClass(), testName);
    // Create Topic
    final String input = uniqueTestName + "-input";
    CLUSTER.createTopic(input);
    final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
        // Create Test Records
        producer.send(new ProducerRecord<>(input, "a"));
        producer.send(new ProducerRecord<>(input, "b"));
        producer.send(new ProducerRecord<>(input, "c"));
        // Create Topology
        final String storeName = uniqueTestName + "-input-table";
        final StreamsBuilder builder = new StreamsBuilder();
        builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
        final Topology topology = builder.build();
        // State Store Directory
        final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
        // Create KafkaStreams instance
        final String applicationId = uniqueTestName + "-app";
        final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
        // Create StateListener
        final CountDownLatch runningLatch = new CountDownLatch(1);
        final CountDownLatch notRunningLatch = new CountDownLatch(1);
        final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                runningLatch.countDown();
            }
            if (newState == KafkaStreams.State.NOT_RUNNING) {
                notRunningLatch.countDown();
            }
        };
        streams.setStateListener(stateListener);
        // Application state directory
        final File appDir = new File(stateDir, applicationId);
        // Validate application state directory is created.
        streams.start();
        try {
            runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't start in time.", e);
        }
        // State directory exists
        assertTrue((new File(stateDir)).exists());
        // Application state directory Exists
        assertTrue(appDir.exists());
        // Validate StateStore directory is deleted.
        streams.close();
        try {
            notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't cleaned up in time.", e);
        }
        streams.cleanUp();
        // Root state store exists
        assertTrue((new File(stateDir)).exists());
        // case 1: the state directory is cleaned up without any problems.
        // case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
        // case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
        assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
    } finally {
        CLUSTER.deleteAllTopicsAndWait(0L);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 3 with TestName

use of org.junit.rules.TestName in project kafka by apache.

the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.

private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
    final String testId = safeUniqueTestName(getClass(), testName);
    final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
    final String inputTopic = "input" + testId;
    final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
    final String storeName = "store" + testId;
    final String storeChangelog = appId + "-store" + testId + "-changelog";
    final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
    IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
    final ReentrantLock assignmentLock = new ReentrantLock();
    final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
    final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
    final AtomicBoolean assignmentStable = new AtomicBoolean(false);
    final AssignmentListener assignmentListener = stable -> {
        assignmentLock.lock();
        try {
            final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
            assignmentsStable.put(thisAssignmentIndex, stable);
            assignmentStable.set(stable);
        } finally {
            assignmentLock.unlock();
        }
    };
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(inputTopic, materializedFunction.apply(storeName));
    final Topology topology = builder.build();
    final int numberOfRecords = 500;
    produceTestData(inputTopic, numberOfRecords);
    try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
        final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
        final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
        kafkaStreams0.start();
        // sanity check: just make sure we actually wrote all the input records
        TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
        // wait until all the input records are in the changelog
        TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
        final AtomicLong instance1TotalRestored = new AtomicLong(-1);
        final AtomicLong instance1NumRestored = new AtomicLong(-1);
        final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
        kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {

            @Override
            public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
            }

            @Override
            public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
                instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
            }

            @Override
            public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
                instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
                restoreCompleteLatch.countDown();
            }
        });
        final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
        kafkaStreams1.start();
        TestUtils.waitForCondition(() -> {
            assignmentLock.lock();
            try {
                if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
                    assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
                    return true;
                } else {
                    return false;
                }
            } finally {
                assignmentLock.unlock();
            }
        }, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
        TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
        restoreCompleteLatch.await();
        // We should finalize the restoration without having restored any records (because they're already in
        // the store. Otherwise, we failed to properly re-use the state from the standby.
        assertThat(instance1TotalRestored.get(), is(0L));
        // Belt-and-suspenders check that we never even attempt to restore any records.
        assertThat(instance1NumRestored.get(), is(-1L));
    }
}
Also used : ReentrantLock(java.util.concurrent.locks.ReentrantLock) Stores(org.apache.kafka.streams.state.Stores) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Category(org.junit.experimental.categories.Category) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Bytes(org.apache.kafka.common.utils.Bytes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) CountDownLatch(java.util.concurrent.CountDownLatch) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) Matchers.is(org.hamcrest.Matchers.is) Topology(org.apache.kafka.streams.Topology) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) Function(java.util.function.Function) NoRetryException(org.apache.kafka.test.NoRetryException) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Producer(org.apache.kafka.clients.producer.Producer) Test(org.junit.Test) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Topology(org.apache.kafka.streams.Topology) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 4 with TestName

use of org.junit.rules.TestName in project kafka by apache.

the class QueryableStateIntegrationTest method before.

@Before
public void before() throws Exception {
    createTopics();
    streamsConfiguration = new Properties();
    final String safeTestName = safeUniqueTestName(getClass(), testName);
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName);
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    streamsConfiguration.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
    stringComparator = Comparator.comparing((KeyValue<String, String> o) -> o.key).thenComparing(o -> o.value);
    stringLongComparator = Comparator.comparing((KeyValue<String, Long> o) -> o.key).thenComparingLong(o -> o.value);
    inputValues = getInputValues();
    inputValuesKeys = new HashSet<>();
    for (final String sentence : inputValues) {
        final String[] words = sentence.split("\\W+");
        Collections.addAll(inputValuesKeys, words);
    }
}
Also used : Arrays(java.util.Arrays) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) MockTime(kafka.utils.MockTime) Instant.ofEpochMilli(java.time.Instant.ofEpochMilli) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) StoreQueryParameters.fromNameAndType(org.apache.kafka.streams.StoreQueryParameters.fromNameAndType) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(org.apache.kafka.test.StreamsTestUtils.startKafkaStreamsAndWaitForRunningState) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) State(org.apache.kafka.streams.KafkaStreams.State) Category(org.junit.experimental.categories.Category) KafkaStreamsTest(org.apache.kafka.streams.KafkaStreamsTest) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) Predicate(org.apache.kafka.streams.kstream.Predicate) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Matchers.is(org.hamcrest.Matchers.is) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KGroupedStream(org.apache.kafka.streams.kstream.KGroupedStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) TreeSet(java.util.TreeSet) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) ArrayList(java.util.ArrayList) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) KTable(org.apache.kafka.streams.kstream.KTable) IntegrationTestUtils.waitForApplicationState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForApplicationState) Properties(java.util.Properties) Test(org.junit.Test) IOException(java.io.IOException) File(java.io.File) Assert.assertNull(org.junit.Assert.assertNull) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) StringReader(java.io.StringReader) TreeMap(java.util.TreeMap) IntegrationTestUtils.getRunningStreams(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.getRunningStreams) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) KafkaStreams(org.apache.kafka.streams.KafkaStreams) BufferedReader(java.io.BufferedReader) ReadOnlySessionStore(org.apache.kafka.streams.state.ReadOnlySessionStore) Assert.assertEquals(org.junit.Assert.assertEquals) QueryableStoreTypes.sessionStore(org.apache.kafka.streams.state.QueryableStoreTypes.sessionStore) QueryableStoreTypes.keyValueStore(org.apache.kafka.streams.state.QueryableStoreTypes.keyValueStore) Produced(org.apache.kafka.streams.kstream.Produced) LoggerFactory(org.slf4j.LoggerFactory) IsEqual.equalTo(org.hamcrest.core.IsEqual.equalTo) Serde(org.apache.kafka.common.serialization.Serde) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) MockMapper(org.apache.kafka.test.MockMapper) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) Materialized(org.apache.kafka.streams.kstream.Materialized) Entry(java.util.Map.Entry) Duration.ofMillis(java.time.Duration.ofMillis) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ReadOnlyWindowStore(org.apache.kafka.streams.state.ReadOnlyWindowStore) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) IntegrationTestUtils.startApplicationAndWaitUntilRunning(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning) BeforeClass(org.junit.BeforeClass) Assert.assertThrows(org.junit.Assert.assertThrows) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KStream(org.apache.kafka.streams.kstream.KStream) Duration.ofSeconds(java.time.Duration.ofSeconds) NoRetryException(org.apache.kafka.test.NoRetryException) HashSet(java.util.HashSet) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) ValueMapper(org.apache.kafka.streams.kstream.ValueMapper) PrintStream(java.io.PrintStream) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Logger(org.slf4j.Logger) Consumed(org.apache.kafka.streams.kstream.Consumed) Matchers(org.hamcrest.Matchers) TimeUnit(java.util.concurrent.TimeUnit) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) Rule(org.junit.Rule) LagInfo(org.apache.kafka.streams.LagInfo) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) FileReader(java.io.FileReader) Comparator(java.util.Comparator) Collections(java.util.Collections) KeyValue(org.apache.kafka.streams.KeyValue) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) Before(org.junit.Before)

Example 5 with TestName

use of org.junit.rules.TestName in project kafka by apache.

the class RegexSourceIntegrationTest method setUp.

@Before
public void setUp() throws InterruptedException {
    outputTopic = createTopic(topicSuffixGenerator.incrementAndGet());
    final Properties properties = new Properties();
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    properties.put(StreamsConfig.MAX_TASK_IDLE_MS_CONFIG, 0L);
    properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
    streamsConfiguration = StreamsTestUtils.getStreamsConfig(IntegrationTestUtils.safeUniqueTestName(RegexSourceIntegrationTest.class, new TestName()), CLUSTER.bootstrapServers(), STRING_SERDE_CLASSNAME, STRING_SERDE_CLASSNAME, properties);
}
Also used : TestName(org.junit.rules.TestName) Properties(java.util.Properties) Before(org.junit.Before)

Aggregations

Properties (java.util.Properties)6 TestName (org.junit.rules.TestName)6 IOException (java.io.IOException)5 Utils.mkEntry (org.apache.kafka.common.utils.Utils.mkEntry)5 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)5 KafkaStreams (org.apache.kafka.streams.KafkaStreams)5 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)5 StreamsConfig (org.apache.kafka.streams.StreamsConfig)5 EmbeddedKafkaCluster (org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster)5 IntegrationTestUtils (org.apache.kafka.streams.integration.utils.IntegrationTestUtils)5 IntegrationTestUtils.safeUniqueTestName (org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName)5 IntegrationTest (org.apache.kafka.test.IntegrationTest)5 TestUtils (org.apache.kafka.test.TestUtils)5 AfterClass (org.junit.AfterClass)5 BeforeClass (org.junit.BeforeClass)5 Rule (org.junit.Rule)5 Test (org.junit.Test)5 Category (org.junit.experimental.categories.Category)5 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)4 ProducerConfig (org.apache.kafka.clients.producer.ProducerConfig)4