use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class KafkaStreamsTest method shouldCleanupResourcesOnCloseWithoutPreviousStart.
@Test
public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception {
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.close();
waitForCondition(() -> streams.state() == KafkaStreams.State.NOT_RUNNING, "Streams never stopped.");
assertThat(appender.getMessages(), not(hasItem(containsString("ERROR"))));
}
assertTrue(supplier.consumer.closed());
assertTrue(supplier.restoreConsumer.closed());
for (final MockProducer<byte[], byte[]> p : supplier.producers) {
assertTrue(p.closed());
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class KafkaStreamsTest method testStateGlobalThreadClose.
@Test
public void testStateGlobalThreadClose() throws Exception {
// make sure we have the global state thread running too
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.start();
waitForCondition(() -> streams.state() == KafkaStreams.State.RUNNING, "Streams never started.");
final GlobalStreamThread globalStreamThread = streams.globalStreamThread;
globalStreamThread.shutdown();
waitForCondition(() -> globalStreamThread.state() == GlobalStreamThread.State.DEAD, "Thread never stopped.");
globalStreamThread.join();
// shutting down the global thread from "external" will yield an error in KafkaStreams
waitForCondition(() -> streams.state() == KafkaStreams.State.PENDING_ERROR, "Thread never stopped.");
streams.close();
waitForCondition(() -> streams.state() == KafkaStreams.State.ERROR, "Thread never stopped.");
assertThat(appender.getMessages(), hasItem(containsString("ERROR")));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamsConfigTest method shouldLogWarningWhenEosBetaIsUsed.
@SuppressWarnings("deprecation")
@Test
public void shouldLogWarningWhenEosBetaIsUsed() {
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_BETA);
LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) {
new StreamsConfig(props);
assertThat(appender.getMessages(), hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE_BETA + "` is deprecated and will be removed in the 4.0.0 release. " + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead."));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamsConfigTest method shouldLogWarningWhenEosAlphaIsUsed.
@SuppressWarnings("deprecation")
@Test
public void shouldLogWarningWhenEosAlphaIsUsed() {
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) {
new StreamsConfig(props);
assertThat(appender.getMessages(), hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE + "` is deprecated and will be removed in the 4.0.0 release. " + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead. " + "Note that this requires broker version 2.5+ so you should prepare " + "to upgrade your brokers if necessary."));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamThreadTest method shouldLogAndRecordSkippedRecordsForInvalidTimestamps.
@Test
public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final Properties config = configProps(false);
config.setProperty(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, LogAndSkipOnInvalidTimestamp.class.getName());
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
thread.setState(StreamThread.State.STARTING);
thread.setState(StreamThread.State.PARTITIONS_REVOKED);
final TaskId task1 = new TaskId(0, t1p1.partition());
final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(Collections.singleton(t1p1));
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
final MetricName skippedTotalMetric = metrics.metricName("skipped-records-total", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
final MetricName skippedRateMetric = metrics.metricName("skipped-records-rate", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordQueue.class)) {
long offset = -1;
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
thread.runOnce();
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
thread.runOnce();
addRecord(mockConsumer, ++offset, 1L);
addRecord(mockConsumer, ++offset, 1L);
thread.runOnce();
final List<String> strings = appender.getMessages();
final String threadTaskPrefix = "stream-thread [" + Thread.currentThread().getName() + "] task [0_1] ";
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[0] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[1] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[2] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[3] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[4] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[5] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
}
}
Aggregations