Search in sources :

Example 56 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractKeyValueStoreTest method shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey.

@Test
public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        try (final KeyValueIterator<Integer, String> iterator = store.reverseRange(-1, 1)) {
            assertFalse(iterator.hasNext());
        }
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 57 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractKeyValueStoreTest method shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo.

@Test
public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        try (final KeyValueIterator<Integer, String> iterator = store.range(2, 1)) {
            assertFalse(iterator.hasNext());
        }
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 58 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractKeyValueStoreTest method shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo.

@Test
public void shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        try (final KeyValueIterator<Integer, String> iterator = store.reverseRange(2, 1)) {
            assertFalse(iterator.hasNext());
        }
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 59 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractSessionBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final SessionStore<String, Long> sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long());
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
    final Time time = new SystemTime();
    context.setTime(1L);
    context.setSystemTimeMs(time.milliseconds());
    sessionStore.init((StateStoreContext) context, sessionStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
        // Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period)
        sessionStore.put(new Windowed<>("initial record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
        // Try inserting a record with timestamp 0 -- should be dropped
        sessionStore.put(new Windowed<>("late record", new SessionWindow(0, 0)), 0L);
        sessionStore.put(new Windowed<>("another on-time record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    sessionStore.close();
}
Also used : Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 60 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class StreamThreadTest method shouldLogAndRecordSkippedRecordsForInvalidTimestamps.

@Test
public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final Properties config = configProps(false);
    config.setProperty(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, LogAndSkipOnInvalidTimestamp.class.getName());
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(config), false);
    thread.setState(StreamThread.State.STARTING);
    thread.setState(StreamThread.State.PARTITIONS_REVOKED);
    final TaskId task1 = new TaskId(0, t1p1.partition());
    final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
    thread.taskManager().handleAssignment(Collections.singletonMap(task1, assignedPartitions), emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(Collections.singleton(t1p1));
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    final MetricName skippedTotalMetric = metrics.metricName("skipped-records-total", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
    final MetricName skippedRateMetric = metrics.metricName("skipped-records-rate", "stream-metrics", Collections.singletonMap("client-id", thread.getName()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordQueue.class)) {
        long offset = -1;
        addRecord(mockConsumer, ++offset);
        addRecord(mockConsumer, ++offset);
        thread.runOnce();
        addRecord(mockConsumer, ++offset);
        addRecord(mockConsumer, ++offset);
        addRecord(mockConsumer, ++offset);
        addRecord(mockConsumer, ++offset);
        thread.runOnce();
        addRecord(mockConsumer, ++offset, 1L);
        addRecord(mockConsumer, ++offset, 1L);
        thread.runOnce();
        final List<String> strings = appender.getMessages();
        final String threadTaskPrefix = "stream-thread [" + Thread.currentThread().getName() + "] task [0_1] ";
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[0] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[1] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[2] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[3] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[4] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
        assertTrue(strings.contains(threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[5] extractedTimestamp=[-1] " + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]"));
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) LogAndSkipOnInvalidTimestamp(org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) TopicPartition(org.apache.kafka.common.TopicPartition) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6