use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractKeyValueStoreTest method shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey.
@Test
public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
try (final KeyValueIterator<Integer, String> iterator = store.reverseRange(-1, 1)) {
assertFalse(iterator.hasNext());
}
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractKeyValueStoreTest method shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo.
@Test
public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
try (final KeyValueIterator<Integer, String> iterator = store.range(2, 1)) {
assertFalse(iterator.hasNext());
}
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractKeyValueStoreTest method shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo.
@Test
public void shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
try (final KeyValueIterator<Integer, String> iterator = store.reverseRange(2, 1)) {
assertFalse(iterator.hasNext());
}
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class AbstractSessionBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final SessionStore<String, Long> sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long());
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
final Time time = new SystemTime();
context.setTime(1L);
context.setSystemTimeMs(time.milliseconds());
sessionStore.init((StateStoreContext) context, sessionStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
// Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period)
sessionStore.put(new Windowed<>("initial record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
// Try inserting a record with timestamp 0 -- should be dropped
sessionStore.put(new Windowed<>("late record", new SessionWindow(0, 0)), 0L);
sessionStore.put(new Windowed<>("another on-time record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
sessionStore.close();
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class StreamThreadTest method shouldLogAndRecordSkippedRecordsForInvalidTimestamps.
@Test
public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final Properties config = configProps(false);
config.setProperty(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, LogAndSkipOnInvalidTimestamp.class.getName());
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
thread.setState(StreamThread.State.STARTING);
thread.setState(StreamThread.State.PARTITIONS_REVOKED);
final TaskId task1 = new TaskId(0, t1p1.partition());
final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(Collections.singleton(t1p1));
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
final MetricName skippedTotalMetric = metrics.metricName("skipped-records-total", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
final MetricName skippedRateMetric = metrics.metricName("skipped-records-rate", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordQueue.class)) {
long offset = -1;
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
thread.runOnce();
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
addRecord(mockConsumer, ++offset);
thread.runOnce();
addRecord(mockConsumer, ++offset, 1L);
addRecord(mockConsumer, ++offset, 1L);
thread.runOnce();
final List<String> strings = appender.getMessages();
final String threadTaskPrefix = "stream-thread [" + Thread.currentThread().getName() + "] task [0_1] ";
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[0] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[1] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[2] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[3] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[4] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[5] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
}
}
Aggregations