use of org.apache.kafka.streams.errors.LogAndContinueExceptionHandler in project kafka by apache.
the class RecordQueueTest method shouldThrowOnNegativeTimestamp.
@Test
public void shouldThrowOnNegativeTimestamp() {
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0, 0, recordKey, recordValue, new RecordHeaders(), Optional.empty()));
final RecordQueue queue = new RecordQueue(new TopicPartition("topic", 1), mockSourceNodeWithMetrics, new FailOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), new InternalMockProcessorContext(), new LogContext());
final StreamsException exception = assertThrows(StreamsException.class, () -> queue.addRawRecords(records));
assertThat(exception.getMessage(), equalTo("Input record ConsumerRecord(topic = topic, partition = 1, " + "leaderEpoch = null, offset = 1, CreateTime = -1, serialized key size = 0, serialized value size = 0, " + "headers = RecordHeaders(headers = [], isReadOnly = false), key = 1, value = 10) has invalid (negative) " + "timestamp. Possibly because a pre-0.10 producer client was used to write this record to Kafka without " + "embedding a timestamp, or because the input topic was created before upgrading the Kafka cluster to 0.10+. " + "Use a different TimestampExtractor to process this data."));
}
use of org.apache.kafka.streams.errors.LogAndContinueExceptionHandler in project kafka by apache.
the class RecordQueueTest method shouldDropOnNegativeTimestamp.
@Test
public void shouldDropOnNegativeTimestamp() {
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0, 0, recordKey, recordValue, new RecordHeaders(), Optional.empty()));
final RecordQueue queue = new RecordQueue(new TopicPartition("topic", 1), mockSourceNodeWithMetrics, new LogAndSkipOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), new InternalMockProcessorContext(), new LogContext());
queue.addRawRecords(records);
assertEquals(0, queue.size());
}
use of org.apache.kafka.streams.errors.LogAndContinueExceptionHandler in project kafka by apache.
the class GlobalStateTaskTest method shouldNotThrowStreamsExceptionWhenValueDeserializationFails.
@Test
public void shouldNotThrowStreamsExceptionWhenValueDeserializationFails() {
final GlobalStateUpdateTask globalStateTask2 = new GlobalStateUpdateTask(logContext, topology, context, stateMgr, new LogAndContinueExceptionHandler());
final byte[] key = new IntegerSerializer().serialize(topic2, 1);
final byte[] recordValue = new LongSerializer().serialize(topic2, 10L);
maybeDeserialize(globalStateTask2, key, recordValue, false);
}
use of org.apache.kafka.streams.errors.LogAndContinueExceptionHandler in project kafka by apache.
the class TopologyTestDriver method setupGlobalTask.
private void setupGlobalTask(final Time mockWallClockTime, final StreamsConfig streamsConfig, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache) {
if (globalTopology != null) {
final MockConsumer<byte[], byte[]> globalConsumer = new MockConsumer<>(OffsetResetStrategy.NONE);
for (final String topicName : globalTopology.sourceTopics()) {
final TopicPartition partition = new TopicPartition(topicName, 0);
globalPartitionsByInputTopic.put(topicName, partition);
offsetsByTopicOrPatternPartition.put(partition, new AtomicLong());
globalConsumer.updatePartitions(topicName, Collections.singletonList(new PartitionInfo(topicName, 0, null, null, null)));
globalConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
globalConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
}
globalStateManager = new GlobalStateManagerImpl(logContext, mockWallClockTime, globalTopology, globalConsumer, stateDirectory, stateRestoreListener, streamsConfig);
final GlobalProcessorContextImpl globalProcessorContext = new GlobalProcessorContextImpl(streamsConfig, globalStateManager, streamsMetrics, cache, mockWallClockTime);
globalStateManager.setGlobalProcessorContext(globalProcessorContext);
globalStateTask = new GlobalStateUpdateTask(logContext, globalTopology, globalProcessorContext, globalStateManager, new LogAndContinueExceptionHandler());
globalStateTask.initialize();
globalProcessorContext.setRecordContext(null);
} else {
globalStateManager = null;
globalStateTask = null;
}
}
Aggregations