use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamsConfigTest method shouldLogWarningWhenEosAlphaIsUsed.
@SuppressWarnings("deprecation")
@Test
public void shouldLogWarningWhenEosAlphaIsUsed() {
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) {
new StreamsConfig(props);
assertThat(appender.getMessages(), hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE + "` is deprecated and will be removed in the 4.0.0 release. " + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead. " + "Note that this requires broker version 2.5+ so you should prepare " + "to upgrade your brokers if necessary."));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamThreadTest method shouldLogAndRecordSkippedMetricForDeserializationException.
@Test
public void shouldLogAndRecordSkippedMetricForDeserializationException() {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final Properties config = configProps(false);
config.setProperty(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueExceptionHandler.class.getName());
config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
thread.setState(StreamThread.State.STARTING);
thread.setState(StreamThread.State.PARTITIONS_REVOKED);
final TaskId task1 = new TaskId(0, t1p1.partition());
final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(Collections.singleton(t1p1));
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
long offset = -1;
mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
mockConsumer.addRecord(new ConsumerRecord<>(t1p1.topic(), t1p1.partition(), ++offset, -1, TimestampType.CREATE_TIME, -1, -1, new byte[0], "I am not an integer.".getBytes(), new RecordHeaders(), Optional.empty()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordDeserializer.class)) {
thread.runOnce();
final List<String> strings = appender.getMessages();
assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[0]"));
assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[1]"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class CachingPersistentSessionStoreTest method shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey.
@Test
public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() {
final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1));
final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class);
final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L)) {
assertFalse(iterator.hasNext());
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final AbstractRocksDBSegmentedBytesStore<S> bytesStore = getBytesStore();
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig));
final Time time = new SystemTime();
context.setSystemTimeMs(time.milliseconds());
bytesStore.init((StateStoreContext) context, bytesStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// write a record to advance stream time, with a high enough timestamp
// that the subsequent record in windows[0] will already be expired.
bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0));
final Bytes key = serializeKey(new Windowed<>("a", windows[0]));
final byte[] value = serializeValue(5);
bytesStore.put(key, value);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
bytesStore.close();
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractWindowBytesStoreTest method shouldNotThrowInvalidRangeExceptionWithNegativeFromKey.
@Test
public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister();
final KeyValueIterator<Windowed<Integer>, String> iterator = windowStore.fetch(-1, 1, 0L, 10L)) {
assertFalse(iterator.hasNext());
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
Aggregations