Search in sources :

Example 6 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.

private void verifyOldAndNewColumnFamily() throws Exception {
    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
    final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
    RocksDB db = null;
    ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
    boolean errorOccurred = false;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        withTimestampColumnFamily = columnFamilies.get(1);
        assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
        assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
        assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
        assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
        assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
        assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
        assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
        assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
    } catch (final RuntimeException fatal) {
        errorOccurred = true;
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (withTimestampColumnFamily != null) {
            withTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        if (errorOccurred) {
            dbOptions.close();
            columnFamilyOptions.close();
        }
    }
    // check that still in upgrade mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
    } finally {
        rocksDBStore.close();
    }
    // clear old CF
    columnFamilies.clear();
    db = null;
    noTimestampColumnFamily = null;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        db.delete(noTimestampColumnFamily, "key7".getBytes());
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        dbOptions.close();
        columnFamilyOptions.close();
    }
    // check that still in regular mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) DBOptions(org.rocksdb.DBOptions) File(java.io.File)

Example 7 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractWindowBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final WindowStore<Integer, String> windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
    final Time time = new SystemTime();
    context.setSystemTimeMs(time.milliseconds());
    context.setTime(1L);
    windowStore.init((StateStoreContext) context, windowStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
        windowStore.put(1, "initial record", 2 * RETENTION_PERIOD);
        // Try inserting a record with timestamp 0 -- should be dropped
        windowStore.put(1, "late record", 0L);
        windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    windowStore.close();
}
Also used : Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 8 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractKeyValueStoreTest method shouldNotThrowInvalidRangeExceptionWithNegativeFromKey.

@Test
public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() {
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        try (final KeyValueIterator<Integer, String> iterator = store.range(-1, 1)) {
            assertFalse(iterator.hasNext());
        }
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 9 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AbstractSessionBytesStoreTest method shouldNotThrowInvalidRangeExceptionWithNegativeFromKey.

@Test
public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() {
    final String keyFrom = Serdes.String().deserializer().deserialize("", Serdes.Integer().serializer().serialize("", -1));
    final String keyTo = Serdes.String().deserializer().deserialize("", Serdes.Integer().serializer().serialize("", 1));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister();
        final KeyValueIterator<Windowed<String>, Long> iterator = sessionStore.findSessions(keyFrom, keyTo, 0L, 10L)) {
        assertFalse(iterator.hasNext());
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 10 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class AdjustStreamThreadCountTest method shouldResizeCacheAfterThreadReplacement.

@Test
public void shouldResizeCacheAfterThreadReplacement() throws InterruptedException {
    final long totalCacheBytes = 10L;
    final Properties props = new Properties();
    props.putAll(properties);
    props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, totalCacheBytes);
    final AtomicBoolean injectError = new AtomicBoolean(false);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> stream = builder.stream(inputTopic);
    stream.transform(() -> new Transformer<String, String, KeyValue<String, String>>() {

        @Override
        public void init(final ProcessorContext context) {
            context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> {
                if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) {
                    injectError.set(false);
                    throw new RuntimeException("BOOM");
                }
            });
        }

        @Override
        public KeyValue<String, String> transform(final String key, final String value) {
            return new KeyValue<>(key, value);
        }

        @Override
        public void close() {
        }
    });
    try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) {
        addStreamStateChangeListener(kafkaStreams);
        kafkaStreams.setUncaughtExceptionHandler(e -> StreamThreadExceptionResponse.REPLACE_THREAD);
        startStreamsAndWaitForRunning(kafkaStreams);
        stateTransitionHistory.clear();
        try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
            injectError.set(true);
            waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error");
            waitForTransitionFromRebalancingToRunning();
            for (final String log : appender.getMessages()) {
                // after we replace the thread there should be two remaining threads with 5 bytes each
                if (log.endsWith("Adding StreamThread-3, there will now be 2 live threads and the new cache size per thread is 5")) {
                    return;
                }
            }
        }
    }
    fail();
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) AfterClass(org.junit.AfterClass) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Category(org.junit.experimental.categories.Category) Executors(java.util.concurrent.Executors) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils.purgeLocalStreamsState(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.purgeLocalStreamsState) List(java.util.List) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Optional(java.util.Optional) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KStream(org.apache.kafka.streams.kstream.KStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) PunctuationType(org.apache.kafka.streams.processor.PunctuationType) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ExecutorService(java.util.concurrent.ExecutorService) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) Transformer(org.apache.kafka.streams.kstream.Transformer) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) StreamThreadExceptionResponse(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Assert.assertEquals(org.junit.Assert.assertEquals) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6