Search in sources :

Example 46 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class GlobalStateManagerImplTest method shouldLogWarningMessageWhenIOExceptionInCheckPoint.

@Test
public void shouldLogWarningMessageWhenIOExceptionInCheckPoint() throws IOException {
    final Map<TopicPartition, Long> offsets = Collections.singletonMap(t1, 25L);
    stateManager.initialize();
    stateManager.updateChangelogOffsets(offsets);
    // set readonly to the CHECKPOINT_FILE_NAME.tmp file because we will write data to the .tmp file first
    // and then swap to CHECKPOINT_FILE_NAME by replacing it
    final File file = new File(stateDirectory.globalStateDir(), StateManagerUtil.CHECKPOINT_FILE_NAME + ".tmp");
    file.createNewFile();
    file.setWritable(false);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(GlobalStateManagerImpl.class)) {
        stateManager.checkpoint();
        assertThat(appender.getMessages(), hasItem(containsString("Failed to write offset checkpoint file to " + checkpointFile.getPath() + " for global stores")));
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) File(java.io.File) Test(org.junit.Test)

Example 47 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class PartitionGroupTest method shouldWaitForPollWhenLagIsNonzero.

@Test
public void shouldWaitForPollWhenLagIsNonzero() {
    final HashMap<TopicPartition, OptionalLong> lags = new HashMap<>();
    final PartitionGroup group = new PartitionGroup(logContext, mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)), tp -> lags.getOrDefault(tp, OptionalLong.empty()), getValueSensor(metrics, lastLatenessValue), enforcedProcessingSensor, 0L);
    final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
    group.addRawRecords(partition1, list1);
    lags.put(partition2, OptionalLong.of(1L));
    assertThat(group.allPartitionsBufferedLocally(), is(false));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) {
        LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class);
        assertThat(group.readyToProcess(0L), is(false));
        assertThat(appender.getEvents(), hasItem(Matchers.allOf(Matchers.hasProperty("level", equalTo("TRACE")), Matchers.hasProperty("message", equalTo("[test] Lag for topic-2 is currently 1, but no data is buffered locally. Waiting to buffer some records.")))));
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OptionalLong(java.util.OptionalLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 48 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class KafkaStreamsTest method shouldCleanupResourcesOnCloseWithoutPreviousStart.

@Test
public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception {
    final StreamsBuilder builder = getBuilderWithSource();
    builder.globalTable("anyTopic");
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
        final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
        streams.close();
        waitForCondition(() -> streams.state() == KafkaStreams.State.NOT_RUNNING, "Streams never stopped.");
        assertThat(appender.getMessages(), not(hasItem(containsString("ERROR"))));
    }
    assertTrue(supplier.consumer.closed());
    assertTrue(supplier.restoreConsumer.closed());
    for (final MockProducer<byte[], byte[]> p : supplier.producers) {
        assertTrue(p.closed());
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 49 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class KafkaStreamsTest method testStateGlobalThreadClose.

@Test
public void testStateGlobalThreadClose() throws Exception {
    // make sure we have the global state thread running too
    final StreamsBuilder builder = getBuilderWithSource();
    builder.globalTable("anyTopic");
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
        final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
        streams.start();
        waitForCondition(() -> streams.state() == KafkaStreams.State.RUNNING, "Streams never started.");
        final GlobalStreamThread globalStreamThread = streams.globalStreamThread;
        globalStreamThread.shutdown();
        waitForCondition(() -> globalStreamThread.state() == GlobalStreamThread.State.DEAD, "Thread never stopped.");
        globalStreamThread.join();
        // shutting down the global thread from "external" will yield an error in KafkaStreams
        waitForCondition(() -> streams.state() == KafkaStreams.State.PENDING_ERROR, "Thread never stopped.");
        streams.close();
        waitForCondition(() -> streams.state() == KafkaStreams.State.ERROR, "Thread never stopped.");
        assertThat(appender.getMessages(), hasItem(containsString("ERROR")));
    }
}
Also used : GlobalStreamThread(org.apache.kafka.streams.processor.internals.GlobalStreamThread) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 50 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class StreamsConfigTest method shouldLogWarningWhenEosBetaIsUsed.

@SuppressWarnings("deprecation")
@Test
public void shouldLogWarningWhenEosBetaIsUsed() {
    props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_BETA);
    LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) {
        new StreamsConfig(props);
        assertThat(appender.getMessages(), hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE_BETA + "` is deprecated and will be removed in the 4.0.0 release. " + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead."));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) StreamsTestUtils.getStreamsConfig(org.apache.kafka.test.StreamsTestUtils.getStreamsConfig) Test(org.junit.Test)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6