use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.
@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
setup(false);
context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
processor.process(new Record<>(null, "1", 0L));
assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
}
assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class KGroupedStreamImplTest method shouldLogAndMeasureSkipsInAggregate.
@Test
public void shouldLogAndMeasureSkipsInAggregate() {
groupedStream.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count").withKeySerde(Serdes.String()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamAggregate.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(driver);
assertThat(appender.getMessages(), hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " + "offset=[6]"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RocksDBTimestampedStoreTest method shouldMigrateDataFromDefaultToTimestampColumnFamily.
@Test
public void shouldMigrateDataFromDefaultToTimestampColumnFamily() throws Exception {
prepareOldStore();
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
}
// approx: 7 entries on old CF, 0 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(7L));
// get()
// should be no-op on both CF
assertThat(rocksDBStore.get(new Bytes("unknown".getBytes())), new IsNull<>());
// approx: 7 entries on old CF, 0 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(7L));
// should migrate key1 from old to new CF
// must return timestamp plus value, ie, it's not 1 byte but 9 bytes
assertThat(rocksDBStore.get(new Bytes("key1".getBytes())).length, is(8 + 1));
// one delete on old CF, one put on new CF
// approx: 6 entries on old CF, 1 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(7L));
// put()
// should migrate key2 from old to new CF with new value
rocksDBStore.put(new Bytes("key2".getBytes()), "timestamp+22".getBytes());
// one delete on old CF, one put on new CF
// approx: 5 entries on old CF, 2 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(7L));
// should delete key3 from old and new CF
rocksDBStore.put(new Bytes("key3".getBytes()), null);
// count is off by one, due to two delete operations (even if one does not delete anything)
// approx: 4 entries on old CF, 1 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(5L));
// should add new key8 to new CF
rocksDBStore.put(new Bytes("key8".getBytes()), "timestamp+88888888".getBytes());
// one delete on old CF, one put on new CF
// approx: 3 entries on old CF, 2 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(5L));
// putIfAbsent()
// should migrate key4 from old to new CF with old value
assertThat(rocksDBStore.putIfAbsent(new Bytes("key4".getBytes()), "timestamp+4444".getBytes()).length, is(8 + 4));
// one delete on old CF, one put on new CF
// approx: 2 entries on old CF, 3 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(5L));
// should add new key11 to new CF
assertThat(rocksDBStore.putIfAbsent(new Bytes("key11".getBytes()), "timestamp+11111111111".getBytes()), new IsNull<>());
// one delete on old CF, one put on new CF
// approx: 1 entries on old CF, 4 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(5L));
// should not delete key5 but migrate to new CF
assertThat(rocksDBStore.putIfAbsent(new Bytes("key5".getBytes()), null).length, is(8 + 5));
// one delete on old CF, one put on new CF
// approx: 0 entries on old CF, 5 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(5L));
// should be no-op on both CF
assertThat(rocksDBStore.putIfAbsent(new Bytes("key12".getBytes()), null), new IsNull<>());
// two delete operation, however, only one is counted because old CF count was zero before already
// approx: 0 entries on old CF, 4 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(4L));
// delete()
// should delete key6 from old and new CF
assertThat(rocksDBStore.delete(new Bytes("key6".getBytes())).length, is(8 + 6));
// two delete operation, however, only one is counted because old CF count was zero before already
// approx: 0 entries on old CF, 3 in new CF
assertThat(rocksDBStore.approximateNumEntries(), is(3L));
iteratorsShouldNotMigrateData();
assertThat(rocksDBStore.approximateNumEntries(), is(3L));
rocksDBStore.close();
verifyOldAndNewColumnFamily();
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RocksDBTimestampedStoreTest method shouldOpenNewStoreInRegularMode.
@Test
public void shouldOpenNewStoreInRegularMode() {
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
}
try (final KeyValueIterator<Bytes, byte[]> iterator = rocksDBStore.all()) {
assertThat(iterator.hasNext(), is(false));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.
private void verifyOldAndNewColumnFamily() throws Exception {
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
boolean errorOccurred = false;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
} catch (final RuntimeException fatal) {
errorOccurred = true;
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
if (errorOccurred) {
dbOptions.close();
columnFamilyOptions.close();
}
}
// check that still in upgrade mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
} finally {
rocksDBStore.close();
}
// clear old CF
columnFamilies.clear();
db = null;
noTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
db.delete(noTimestampColumnFamily, "key7".getBytes());
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
// check that still in regular mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
}
}
Aggregations