Search in sources :

Example 31 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class CachingPersistentWindowStoreTest method shouldNotThrowInvalidRangeExceptionWithNegativeFromKey.

@Test
public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() {
    final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1));
    final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class);
        final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) {
        assertFalse(iterator.hasNext());
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) Bytes(org.apache.kafka.common.utils.Bytes) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 32 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class CachingPersistentWindowStoreTest method shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey.

@Test
public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() {
    final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1));
    final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class);
        final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) {
        assertFalse(iterator.hasNext());
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
    }
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) Bytes(org.apache.kafka.common.utils.Bytes) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 33 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class TaskManagerTest method shouldHaveRemainingPartitionsUncleared.

@Test
public void shouldHaveRemainingPartitionsUncleared() {
    final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true);
    final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
    task00.setCommittableOffsetsAndMetadata(offsets);
    expectRestoreToBeCompleted(consumer, changeLogReader);
    expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andReturn(singletonList(task00));
    consumer.commitSync(offsets);
    expectLastCall();
    replay(activeTaskCreator, consumer, changeLogReader);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TaskManager.class)) {
        LogCaptureAppender.setClassLoggerToDebug(TaskManager.class);
        taskManager.handleAssignment(taskId00Assignment, emptyMap());
        assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
        assertThat(task00.state(), is(Task.State.RUNNING));
        taskManager.handleRevocation(mkSet(t1p0, new TopicPartition("unknown", 0)));
        assertThat(task00.state(), is(Task.State.SUSPENDED));
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("taskManagerTestThe following revoked partitions [unknown-0] are missing " + "from the current task partitions. It could potentially be due to race " + "condition of consumer detecting the heartbeat failure, or the " + "tasks have been cleaned up by the handleAssignment callback."));
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) EasyMock.anyString(org.easymock.EasyMock.anyString) Test(org.junit.Test)

Example 34 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class KGroupedStreamImplTest method shouldLogAndMeasureSkipsInReduce.

@Test
public void shouldLogAndMeasureSkipsInReduce() {
    groupedStream.reduce(MockReducer.STRING_ADDER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("reduce").withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamReduce.class);
        final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        processData(driver);
        assertThat(appender.getMessages(), hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " + "offset=[6]"));
    }
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Test(org.junit.Test)

Example 35 with LogCaptureAppender

use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.

the class PartitionGroupTest method shouldWaitForPollWhenLagIsNonzero.

@Test
public void shouldWaitForPollWhenLagIsNonzero() {
    final HashMap<TopicPartition, OptionalLong> lags = new HashMap<>();
    final PartitionGroup group = new PartitionGroup(logContext, mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)), tp -> lags.getOrDefault(tp, OptionalLong.empty()), getValueSensor(metrics, lastLatenessValue), enforcedProcessingSensor, 0L);
    final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
    group.addRawRecords(partition1, list1);
    lags.put(partition2, OptionalLong.of(1L));
    assertThat(group.allPartitionsBufferedLocally(), is(false));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) {
        LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class);
        assertThat(group.readyToProcess(0L), is(false));
        assertThat(appender.getEvents(), hasItem(Matchers.allOf(Matchers.hasProperty("level", equalTo("TRACE")), Matchers.hasProperty("message", equalTo("[test] Lag for topic-2 is currently 1, but no data is buffered locally. Waiting to buffer some records.")))));
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OptionalLong(java.util.OptionalLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)66 Test (org.junit.Test)65 Windowed (org.apache.kafka.streams.kstream.Windowed)16 Bytes (org.apache.kafka.common.utils.Bytes)14 Properties (java.util.Properties)13 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)13 MetricName (org.apache.kafka.common.MetricName)11 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 File (java.io.File)8 Serdes (org.apache.kafka.common.serialization.Serdes)8 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)8 TopicPartition (org.apache.kafka.common.TopicPartition)7 StreamsTestUtils (org.apache.kafka.test.StreamsTestUtils)7 CoreMatchers.hasItem (org.hamcrest.CoreMatchers.hasItem)7 Duration (java.time.Duration)6 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)6 KeyValueTimestamp (org.apache.kafka.streams.KeyValueTimestamp)6 Consumed (org.apache.kafka.streams.kstream.Consumed)6