use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class CachingPersistentWindowStoreTest method shouldNotThrowInvalidRangeExceptionWithNegativeFromKey.
@Test
public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() {
final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1));
final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class);
final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) {
assertFalse(iterator.hasNext());
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class CachingPersistentWindowStoreTest method shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey.
@Test
public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() {
final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1));
final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class);
final KeyValueIterator<Windowed<Bytes>, byte[]> iterator = cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) {
assertFalse(iterator.hasNext());
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + " Note that the built-in numerical serdes do not follow this for negative numbers"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class TaskManagerTest method shouldHaveRemainingPartitionsUncleared.
@Test
public void shouldHaveRemainingPartitionsUncleared() {
final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true);
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
task00.setCommittableOffsetsAndMetadata(offsets);
expectRestoreToBeCompleted(consumer, changeLogReader);
expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andReturn(singletonList(task00));
consumer.commitSync(offsets);
expectLastCall();
replay(activeTaskCreator, consumer, changeLogReader);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TaskManager.class)) {
LogCaptureAppender.setClassLoggerToDebug(TaskManager.class);
taskManager.handleAssignment(taskId00Assignment, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
assertThat(task00.state(), is(Task.State.RUNNING));
taskManager.handleRevocation(mkSet(t1p0, new TopicPartition("unknown", 0)));
assertThat(task00.state(), is(Task.State.SUSPENDED));
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("taskManagerTestThe following revoked partitions [unknown-0] are missing " + "from the current task partitions. It could potentially be due to race " + "condition of consumer detecting the heartbeat failure, or the " + "tasks have been cleaned up by the handleAssignment callback."));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class KGroupedStreamImplTest method shouldLogAndMeasureSkipsInReduce.
@Test
public void shouldLogAndMeasureSkipsInReduce() {
groupedStream.reduce(MockReducer.STRING_ADDER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("reduce").withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamReduce.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(driver);
assertThat(appender.getMessages(), hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " + "offset=[6]"));
}
}
use of org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender in project kafka by apache.
the class PartitionGroupTest method shouldWaitForPollWhenLagIsNonzero.
@Test
public void shouldWaitForPollWhenLagIsNonzero() {
final HashMap<TopicPartition, OptionalLong> lags = new HashMap<>();
final PartitionGroup group = new PartitionGroup(logContext, mkMap(mkEntry(partition1, queue1), mkEntry(partition2, queue2)), tp -> lags.getOrDefault(tp, OptionalLong.empty()), getValueSensor(metrics, lastLatenessValue), enforcedProcessingSensor, 0L);
final List<ConsumerRecord<byte[], byte[]>> list1 = Arrays.asList(new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue));
group.addRawRecords(partition1, list1);
lags.put(partition2, OptionalLong.of(1L));
assertThat(group.allPartitionsBufferedLocally(), is(false));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) {
LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class);
assertThat(group.readyToProcess(0L), is(false));
assertThat(appender.getEvents(), hasItem(Matchers.allOf(Matchers.hasProperty("level", equalTo("TRACE")), Matchers.hasProperty("message", equalTo("[test] Lag for topic-2 is currently 1, but no data is buffered locally. Waiting to buffer some records.")))));
}
}
Aggregations