use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnCloseIfASendFailedWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnCloseIfASendFailedWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
try {
collector.close();
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) {
/* ok */
}
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnAnyExceptionButProducerFencedException.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnAnyExceptionButProducerFencedException() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
throw new KafkaException();
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
use of org.apache.kafka.clients.producer.Callback in project flink by apache.
the class FlinkKafkaProducerBaseTest method testAsyncErrorRethrownOnCheckpointAfterFlush.
/**
* Test ensuring that if an async exception is caught for one of the flushed requests on
* checkpoint, it should be rethrown; we set a timeout because the test will not finish if the
* logic is broken.
*
* <p>Note that this test does not test the snapshot method is blocked correctly when there are
* pending records. The test for that is covered in testAtLeastOnceProducer.
*/
@SuppressWarnings("unchecked")
@Test(timeout = 5000)
public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable {
final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
producer.setFlushOnCheckpoint(true);
final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();
final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));
testHarness.open();
testHarness.processElement(new StreamRecord<>("msg-1"));
testHarness.processElement(new StreamRecord<>("msg-2"));
testHarness.processElement(new StreamRecord<>("msg-3"));
verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class));
// only let the first callback succeed for now
producer.getPendingCallbacks().get(0).onCompletion(null, null);
CheckedThread snapshotThread = new CheckedThread() {
@Override
public void go() throws Exception {
// this should block at first, since there are still two pending records
// that needs to be flushed
testHarness.snapshot(123L, 123L);
}
};
snapshotThread.start();
// let the 2nd message fail with an async exception
producer.getPendingCallbacks().get(1).onCompletion(null, new Exception("artificial async failure for 2nd message"));
producer.getPendingCallbacks().get(2).onCompletion(null, null);
try {
snapshotThread.sync();
} catch (Exception e) {
// the snapshot should have failed with the async exception
Assert.assertTrue(e.getCause().getMessage().contains("artificial async failure for 2nd message"));
// test succeeded
return;
}
Assert.fail();
}
use of org.apache.kafka.clients.producer.Callback in project flink by apache.
the class FlinkKafkaProducer method open.
// ----------------------------------- Utilities --------------------------
/**
* Initializes the connection to Kafka.
*/
@Override
public void open(Configuration configuration) throws Exception {
if (logFailuresOnly) {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
}
acknowledgeMessage();
}
};
} else {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null && asyncException == null) {
asyncException = exception;
}
acknowledgeMessage();
}
};
}
RuntimeContext ctx = getRuntimeContext();
if (flinkKafkaPartitioner != null) {
flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
}
if (kafkaSchema instanceof KafkaContextAware) {
KafkaContextAware<IN> contextAwareSchema = (KafkaContextAware<IN>) kafkaSchema;
contextAwareSchema.setParallelInstanceId(ctx.getIndexOfThisSubtask());
contextAwareSchema.setNumParallelInstances(ctx.getNumberOfParallelSubtasks());
}
if (kafkaSchema != null) {
kafkaSchema.open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
}
super.open(configuration);
}
Aggregations