Search in sources :

Example 16 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class StateDirectoryTest method shouldLogTempDirMessage.

@Test
public void shouldLogTempDirMessage() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
        new StateDirectory(new StreamsConfig(mkMap(mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ""), mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, ""))), new MockTime(), true, false);
        assertThat(appender.getMessages(), hasItem("Using an OS temp directory in the state.dir property can cause failures with writing the" + " checkpoint file due to the fact that this directory can be cleared by the OS." + " Resolved state.dir: [" + System.getProperty("java.io.tmpdir") + "/kafka-streams]"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 17 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class StateDirectoryTest method shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteTheParentDirectories.

@Test
public void shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteTheParentDirectories() throws IOException {
    initializeStateDirectory(true, true);
    final File taskDir = directory.getOrCreateDirectoryForTask(new TaskId(2, 0, "topology1"));
    final File namedTopologyDir = new File(appDir, "__topology1__");
    assertThat(namedTopologyDir.exists(), is(true));
    assertThat(taskDir.exists(), is(true));
    assertTrue(new File(taskDir, "store").mkdir());
    assertThat(directory.listAllTaskDirectories().size(), is(1));
    assertThat(directory.listNonEmptyTaskDirectories().size(), is(1));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
        time.sleep(5000);
        directory.cleanRemovedTasks(0);
        assertThat(taskDir.exists(), is(false));
        assertThat(namedTopologyDir.exists(), is(false));
        assertThat(directory.listAllTaskDirectories().size(), is(0));
        assertThat(directory.listNonEmptyTaskDirectories().size(), is(0));
        assertThat(appender.getMessages(), hasItem(containsString("Deleting obsolete state directory")));
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) File(java.io.File) Test(org.junit.Test)

Example 18 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class ProcessorStateManagerTest method shouldLogAWarningIfCheckpointThrowsAnIOException.

@SuppressWarnings("OptionalGetWithoutIsPresent")
@Test
public void shouldLogAWarningIfCheckpointThrowsAnIOException() {
    final ProcessorStateManager stateMgr = getStateManager(Task.TaskType.ACTIVE);
    stateMgr.registerStore(persistentStore, persistentStore.stateRestoreCallback, null);
    stateDirectory.clean();
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(ProcessorStateManager.class)) {
        stateMgr.updateChangelogOffsets(singletonMap(persistentStorePartition, 10L));
        stateMgr.checkpoint();
        boolean foundExpectedLogMessage = false;
        for (final LogCaptureAppender.Event event : appender.getEvents()) {
            if ("WARN".equals(event.getLevel()) && event.getMessage().startsWith("process-state-manager-test Failed to write offset checkpoint file to [") && event.getMessage().endsWith(".checkpoint]." + " This may occur if OS cleaned the state.dir in case when it located in ${java.io.tmpdir} directory." + " This may also occur due to running multiple instances on the same machine using the same state dir." + " Changing the location of state.dir may resolve the problem.") && event.getThrowableInfo().get().startsWith("java.io.FileNotFoundException: ")) {
                foundExpectedLogMessage = true;
                break;
            }
        }
        assertTrue(foundExpectedLogMessage);
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 19 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class PartitionGroupTest method shouldNeverWaitIfIdlingIsDisabled.

@Test
public void shouldNeverWaitIfIdlingIsDisabled() {
    final PartitionGroup group = new PartitionGroup(logContext, mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)), tp -> OptionalLong.of(0L), getValueSensor(metrics, lastLatenessValue), enforcedProcessingSensor, StreamsConfig.MAX_TASK_IDLE_MS_DISABLED);
    final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
    group.addRawRecords(partition1, list1);
    assertThat(group.allPartitionsBufferedLocally(), is(false));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) {
        LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class);
        assertThat(group.readyToProcess(0L), is(true));
        assertThat(appender.getEvents(), hasItem(Matchers.allOf(Matchers.hasProperty("level", equalTo("TRACE")), Matchers.hasProperty("message", equalTo("[test] Ready for processing because max.task.idle.ms is disabled.\n" + "\tThere may be out-of-order processing for this task as a result.\n" + "\tBuffered partitions: [topic-1]\n" + "\tNon-buffered partitions: [topic-2]")))));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 20 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class PartitionGroupTest method shouldBeReadyIfAllPartitionsAreBuffered.

@Test
public void shouldBeReadyIfAllPartitionsAreBuffered() {
    final PartitionGroup group = new PartitionGroup(logContext, mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)), tp -> OptionalLong.of(0L), getValueSensor(metrics, lastLatenessValue), enforcedProcessingSensor, 0L);
    final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
    group.addRawRecords(partition1, list1);
    final List<ConsumerRecord<byte[], byte[]>> list2 = Arrays.asList(new ConsumerRecord<>("topic", 2, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 2, 5L, recordKey, recordValue));
    group.addRawRecords(partition2, list2);
    assertThat(group.allPartitionsBufferedLocally(), is(true));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) {
        LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class);
        assertThat(group.readyToProcess(0L), is(true));
        assertThat(appender.getEvents(), hasItem(Matchers.allOf(Matchers.hasProperty("level", equalTo("TRACE")), Matchers.hasProperty("message", equalTo("[test] All partitions were buffered locally, so this task is ready for processing.")))));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6