use of org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentFlushIfFatalEvenWithContinueExceptionHandler.
@Test
public void shouldThrowStreamsExceptionOnSubsequentFlushIfFatalEvenWithContinueExceptionHandler() {
final KafkaException exception = new AuthenticationException("KABOOM!");
final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(exception), new AlwaysContinueProductionExceptionHandler(), streamsMetrics);
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
final StreamsException thrown = assertThrows(StreamsException.class, collector::flush);
assertEquals(exception, thrown.getCause());
assertThat(thrown.getMessage(), equalTo("Error encountered sending record to topic topic for task 0_0 due to:" + "\norg.apache.kafka.common.errors.AuthenticationException: KABOOM!" + "\nWritten offsets would not be recorded and no more records would be sent since this is a fatal error."));
}
use of org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler in project kafka by apache.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(new Exception()), new AlwaysContinueProductionExceptionHandler(), streamsMetrics);
try (final LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) {
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
final List<String> messages = logCaptureAppender.getMessages();
final StringBuilder errorMessage = new StringBuilder("Messages received:");
for (final String error : messages) {
errorMessage.append("\n - ").append(error);
}
assertTrue(errorMessage.toString(), messages.get(messages.size() - 1).endsWith("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded."));
}
final Metric metric = streamsMetrics.metrics().get(new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", Thread.currentThread().getName()), mkEntry("task-id", taskId.toString()))));
assertEquals(1.0, metric.metricValue());
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
collector.closeClean();
}
Aggregations