use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamThreadTest method shouldLogAndRecordSkippedMetricForDeserializationException.
@Test
public void shouldLogAndRecordSkippedMetricForDeserializationException() {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final Properties config = configProps(false);
config.setProperty(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueExceptionHandler.class.getName());
config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
thread.setState(StreamThread.State.STARTING);
thread.setState(StreamThread.State.PARTITIONS_REVOKED);
final TaskId task1 = new TaskId(0, t1p1.partition());
final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(Collections.singleton(t1p1));
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
long offset = -1;
mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordDeserializer.class)) {
thread.runOnce();
final List<String> strings = appender.getMessages();
assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[0]"));
assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[1]"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(new Exception()), new AlwaysContinueProductionExceptionHandler(), streamsMetrics);
try (final LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) {
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
final List<String> messages = logCaptureAppender.getMessages();
final StringBuilder errorMessage = new StringBuilder("Messages received:");
for (final String error : messages) {
errorMessage.append("\n - ").append(error);
}
assertTrue(errorMessage.toString(), messages.get(messages.size() - 1).endsWith("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded."));
}
final Metric metric = streamsMetrics.metrics().get(new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", Thread.currentThread().getName()), mkEntry("task-id", taskId.toString()))));
assertEquals(1.0, metric.metricValue());
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
collector.closeClean();
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself.
@Test
public void shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself() {
final File dir = directory.getOrCreateDirectoryForTask(new TaskId(2, 0));
assertTrue(new File(dir, "store").mkdir());
assertEquals(1, directory.listAllTaskDirectories().size());
assertEquals(1, directory.listNonEmptyTaskDirectories().size());
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
time.sleep(5000);
directory.cleanRemovedTasks(0);
assertFalse(dir.exists());
assertEquals(0, directory.listAllTaskDirectories().size());
assertEquals(0, directory.listNonEmptyTaskDirectories().size());
assertThat(appender.getMessages(), hasItem(containsString("Deleting obsolete state directory")));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldLogStateDirCleanerMessage.
@Test
public void shouldLogStateDirCleanerMessage() {
final TaskId taskId = new TaskId(0, 0);
final File taskDirectory = directory.getOrCreateDirectoryForTask(taskId);
final File testFile = new File(taskDirectory, "testFile");
assertThat(testFile.mkdir(), is(true));
assertThat(directory.directoryForTaskIsEmpty(taskId), is(false));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
final long cleanupDelayMs = 0;
time.sleep(5000);
directory.cleanRemovedTasks(cleanupDelayMs);
assertThat(appender.getMessages(), hasItem(endsWith("ms has elapsed (cleanup delay is " + cleanupDelayMs + "ms).")));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StateDirectoryTest method shouldNotCreateBaseDirectory.
@Test
public void shouldNotCreateBaseDirectory() throws IOException {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) {
initializeStateDirectory(false, false);
assertThat(stateDir.exists(), is(false));
assertThat(appDir.exists(), is(false));
assertThat(appender.getMessages(), not(hasItem(containsString("Error changing permissions for the state or base directory"))));
}
}
Aggregations