Search in sources :

Example 1 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.

@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
    setup(false);
    context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
        processor.process(new Record<>(null, "1", 0L));
        assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
    }
    assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) Stores(org.apache.kafka.streams.state.Stores) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) MetricName(org.apache.kafka.common.MetricName) Serdes(org.apache.kafka.common.serialization.Serdes) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Time(org.apache.kafka.common.utils.Time) TestUtils(org.apache.kafka.test.TestUtils) ThreadCache(org.apache.kafka.streams.state.internals.ThreadCache) KeyValue(org.apache.kafka.streams.KeyValue) Collectors(java.util.stream.Collectors) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Record(org.apache.kafka.streams.processor.api.Record) Processor(org.apache.kafka.streams.processor.api.Processor) SessionStore(org.apache.kafka.streams.state.SessionStore) MockRecordCollector(org.apache.kafka.test.MockRecordCollector) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) StreamsTestUtils.getMetricByName(org.apache.kafka.test.StreamsTestUtils.getMetricByName) Before(org.junit.Before) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Merger(org.apache.kafka.streams.kstream.Merger) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) KeyValueTimestamp(org.apache.kafka.streams.KeyValueTimestamp) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Assert.assertEquals(org.junit.Assert.assertEquals) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Test(org.junit.Test)

Example 2 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class KGroupedStreamImplTest method shouldLogAndMeasureSkipsInAggregate.

@Test
public void shouldLogAndMeasureSkipsInAggregate() {
    groupedStream.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count").withKeySerde(Serdes.String()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamAggregate.class);
        final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        processData(driver);
        assertThat(appender.getMessages(), hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " + "offset=[6]"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Test(org.junit.Test)

Example 3 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class RocksDBTimestampedStoreTest method shouldMigrateDataFromDefaultToTimestampColumnFamily.

@Test
public void shouldMigrateDataFromDefaultToTimestampColumnFamily() throws Exception {
    prepareOldStore();
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
    }
    // approx: 7 entries on old CF, 0 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(7L));
    // get()
    // should be no-op on both CF
    assertThat(rocksDBStore.get(new Bytes("unknown".getBytes())), new IsNull<>());
    // approx: 7 entries on old CF, 0 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(7L));
    // should migrate key1 from old to new CF
    // must return timestamp plus value, ie, it's not 1 byte but 9 bytes
    assertThat(rocksDBStore.get(new Bytes("key1".getBytes())).length, is(8 + 1));
    // one delete on old CF, one put on new CF
    // approx: 6 entries on old CF, 1 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(7L));
    // put()
    // should migrate key2 from old to new CF with new value
    rocksDBStore.put(new Bytes("key2".getBytes()), "timestamp+22".getBytes());
    // one delete on old CF, one put on new CF
    // approx: 5 entries on old CF, 2 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(7L));
    // should delete key3 from old and new CF
    rocksDBStore.put(new Bytes("key3".getBytes()), null);
    // count is off by one, due to two delete operations (even if one does not delete anything)
    // approx: 4 entries on old CF, 1 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(5L));
    // should add new key8 to new CF
    rocksDBStore.put(new Bytes("key8".getBytes()), "timestamp+88888888".getBytes());
    // one delete on old CF, one put on new CF
    // approx: 3 entries on old CF, 2 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(5L));
    // putIfAbsent()
    // should migrate key4 from old to new CF with old value
    assertThat(rocksDBStore.putIfAbsent(new Bytes("key4".getBytes()), "timestamp+4444".getBytes()).length, is(8 + 4));
    // one delete on old CF, one put on new CF
    // approx: 2 entries on old CF, 3 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(5L));
    // should add new key11 to new CF
    assertThat(rocksDBStore.putIfAbsent(new Bytes("key11".getBytes()), "timestamp+11111111111".getBytes()), new IsNull<>());
    // one delete on old CF, one put on new CF
    // approx: 1 entries on old CF, 4 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(5L));
    // should not delete key5 but migrate to new CF
    assertThat(rocksDBStore.putIfAbsent(new Bytes("key5".getBytes()), null).length, is(8 + 5));
    // one delete on old CF, one put on new CF
    // approx: 0 entries on old CF, 5 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(5L));
    // should be no-op on both CF
    assertThat(rocksDBStore.putIfAbsent(new Bytes("key12".getBytes()), null), new IsNull<>());
    // two delete operation, however, only one is counted because old CF count was zero before already
    // approx: 0 entries on old CF, 4 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(4L));
    // delete()
    // should delete key6 from old and new CF
    assertThat(rocksDBStore.delete(new Bytes("key6".getBytes())).length, is(8 + 6));
    // two delete operation, however, only one is counted because old CF count was zero before already
    // approx: 0 entries on old CF, 3 in new CF
    assertThat(rocksDBStore.approximateNumEntries(), is(3L));
    iteratorsShouldNotMigrateData();
    assertThat(rocksDBStore.approximateNumEntries(), is(3L));
    rocksDBStore.close();
    verifyOldAndNewColumnFamily();
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 4 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class RocksDBTimestampedStoreTest method shouldOpenNewStoreInRegularMode.

@Test
public void shouldOpenNewStoreInRegularMode() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
    }
    try (final KeyValueIterator<Bytes, byte[]> iterator = rocksDBStore.all()) {
        assertThat(iterator.hasNext(), is(false));
    }
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 5 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.

private void verifyOldAndNewColumnFamily() throws Exception {
    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
    final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
    RocksDB db = null;
    ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
    boolean errorOccurred = false;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        withTimestampColumnFamily = columnFamilies.get(1);
        assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
        assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
        assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
        assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
        assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
        assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
        assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
        assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
    } catch (final RuntimeException fatal) {
        errorOccurred = true;
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (withTimestampColumnFamily != null) {
            withTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        if (errorOccurred) {
            dbOptions.close();
            columnFamilyOptions.close();
        }
    }
    // check that still in upgrade mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
    } finally {
        rocksDBStore.close();
    }
    // clear old CF
    columnFamilies.clear();
    db = null;
    noTimestampColumnFamily = null;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        db.delete(noTimestampColumnFamily, "key7".getBytes());
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        dbOptions.close();
        columnFamilyOptions.close();
    }
    // check that still in regular mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) DBOptions(org.rocksdb.DBOptions) File(java.io.File)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6