use of org.apache.kafka.clients.producer.Callback in project flink by apache.
the class FlinkKafkaProducerBase method open.
// ----------------------------------- Utilities --------------------------
/**
* Initializes the connection to Kafka.
*/
@Override
public void open(Configuration configuration) throws Exception {
if (schema instanceof KeyedSerializationSchemaWrapper) {
((KeyedSerializationSchemaWrapper<IN>) schema).getSerializationSchema().open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
}
producer = getKafkaProducer(this.producerConfig);
RuntimeContext ctx = getRuntimeContext();
if (null != flinkKafkaPartitioner) {
flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
}
LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into default topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId);
// register Kafka metrics to Flink accumulators
if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
if (metrics == null) {
// MapR's Kafka implementation returns null here.
LOG.info("Producer implementation does not support metrics");
} else {
final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
}
}
}
if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
flushOnCheckpoint = false;
}
if (logFailuresOnly) {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
}
acknowledgeMessage();
}
};
} else {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null && asyncException == null) {
asyncException = exception;
}
acknowledgeMessage();
}
};
}
}
use of org.apache.kafka.clients.producer.Callback in project flink by apache.
the class KafkaTestBase method produceToKafka.
public static <K, V> void produceToKafka(Collection<ProducerRecord<K, V>> records, Class<? extends org.apache.kafka.common.serialization.Serializer<K>> keySerializerClass, Class<? extends org.apache.kafka.common.serialization.Serializer<V>> valueSerializerClass) throws Throwable {
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(kafkaServer.getIdempotentProducerConfig());
props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass.getName());
props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass.getName());
AtomicReference<Throwable> sendingError = new AtomicReference<>();
Callback callback = (metadata, exception) -> {
if (exception != null) {
if (!sendingError.compareAndSet(null, exception)) {
sendingError.get().addSuppressed(exception);
}
}
};
try (KafkaProducer<K, V> producer = new KafkaProducer<>(props)) {
for (ProducerRecord<K, V> record : records) {
producer.send(record, callback);
}
}
if (sendingError.get() != null) {
throw sendingError.get();
}
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class SenderTest method testAppendInExpiryCallback.
@Test
public void testAppendInExpiryCallback() throws InterruptedException {
int messagesPerBatch = 10;
final AtomicInteger expiryCallbackCount = new AtomicInteger(0);
final AtomicReference<Exception> unexpectedException = new AtomicReference<>();
final byte[] key = "key".getBytes();
final byte[] value = "value".getBytes();
final long maxBlockTimeMs = 1000;
Callback callback = (metadata, exception) -> {
if (exception instanceof TimeoutException) {
expiryCallbackCount.incrementAndGet();
try {
accumulator.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
} catch (InterruptedException e) {
throw new RuntimeException("Unexpected interruption", e);
}
} else if (exception != null)
unexpectedException.compareAndSet(null, exception);
};
final long nowMs = time.milliseconds();
for (int i = 0; i < messagesPerBatch; i++) accumulator.append(tp1, 0L, key, value, null, callback, maxBlockTimeMs, false, nowMs);
// Advance the clock to expire the first batch.
time.sleep(10000);
Node clusterNode = metadata.fetch().nodes().get(0);
Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(metadata.fetch(), Collections.singleton(clusterNode), Integer.MAX_VALUE, time.milliseconds());
sender.addToInflightBatches(drainedBatches);
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
client.disconnect(clusterNode.idString());
client.backoff(clusterNode, 100);
// We should try to flush the batch, but we expire it instead without sending anything.
sender.runOnce();
assertEquals(messagesPerBatch, expiryCallbackCount.get(), "Callbacks not invoked for expiry");
assertNull(unexpectedException.get(), "Unexpected exception");
// Make sure that the reconds were appended back to the batch.
assertTrue(accumulator.batches().containsKey(tp1));
assertEquals(1, accumulator.batches().get(tp1).size());
assertEquals(messagesPerBatch, accumulator.batches().get(tp1).peekFirst().recordCount);
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class ProducerBatchTest method testBatchAbort.
@Test
public void testBatchAbort() throws Exception {
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now);
MockCallback callback = new MockCallback();
FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now);
KafkaException exception = new KafkaException();
batch.abort(exception);
assertTrue(future.isDone());
assertEquals(1, callback.invocations);
assertEquals(exception, callback.exception);
assertNull(callback.metadata);
// subsequent completion should be ignored
assertFalse(batch.complete(500L, 2342342341L));
assertFalse(batch.completeExceptionally(new KafkaException(), index -> new KafkaException()));
assertEquals(1, callback.invocations);
assertTrue(future.isDone());
try {
future.get();
fail("Future should have thrown");
} catch (ExecutionException e) {
assertEquals(exception, e.getCause());
}
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class ProducerPerformance method start.
void start(String[] args) throws IOException {
ArgumentParser parser = argParser();
try {
Namespace res = parser.parseArgs(args);
/* parse args */
String topicName = res.getString("topic");
long numRecords = res.getLong("numRecords");
Integer recordSize = res.getInt("recordSize");
int throughput = res.getInt("throughput");
List<String> producerProps = res.getList("producerConfig");
String producerConfig = res.getString("producerConfigFile");
String payloadFilePath = res.getString("payloadFile");
String transactionalId = res.getString("transactionalId");
boolean shouldPrintMetrics = res.getBoolean("printMetrics");
long transactionDurationMs = res.getLong("transactionDurationMs");
boolean transactionsEnabled = 0 < transactionDurationMs;
// since default value gets printed with the help text, we are escaping \n there and replacing it with correct value here.
String payloadDelimiter = res.getString("payloadDelimiter").equals("\\n") ? "\n" : res.getString("payloadDelimiter");
if (producerProps == null && producerConfig == null) {
throw new ArgumentParserException("Either --producer-props or --producer.config must be specified.", parser);
}
List<byte[]> payloadByteList = readPayloadFile(payloadFilePath, payloadDelimiter);
Properties props = readProps(producerProps, producerConfig, transactionalId, transactionsEnabled);
KafkaProducer<byte[], byte[]> producer = createKafkaProducer(props);
if (transactionsEnabled)
producer.initTransactions();
/* setup perf test */
byte[] payload = null;
if (recordSize != null) {
payload = new byte[recordSize];
}
Random random = new Random(0);
ProducerRecord<byte[], byte[]> record;
Stats stats = new Stats(numRecords, 5000);
long startMs = System.currentTimeMillis();
ThroughputThrottler throttler = new ThroughputThrottler(throughput, startMs);
int currentTransactionSize = 0;
long transactionStartTime = 0;
for (long i = 0; i < numRecords; i++) {
payload = generateRandomPayload(recordSize, payloadByteList, payload, random);
if (transactionsEnabled && currentTransactionSize == 0) {
producer.beginTransaction();
transactionStartTime = System.currentTimeMillis();
}
record = new ProducerRecord<>(topicName, payload);
long sendStartMs = System.currentTimeMillis();
Callback cb = stats.nextCompletion(sendStartMs, payload.length, stats);
producer.send(record, cb);
currentTransactionSize++;
if (transactionsEnabled && transactionDurationMs <= (sendStartMs - transactionStartTime)) {
producer.commitTransaction();
currentTransactionSize = 0;
}
if (throttler.shouldThrottle(i, sendStartMs)) {
throttler.throttle();
}
}
if (transactionsEnabled && currentTransactionSize != 0)
producer.commitTransaction();
if (!shouldPrintMetrics) {
producer.close();
/* print final results */
stats.printTotal();
} else {
// Make sure all messages are sent before printing out the stats and the metrics
// We need to do this in a different branch for now since tests/kafkatest/sanity_checks/test_performance_services.py
// expects this class to work with older versions of the client jar that don't support flush().
producer.flush();
/* print final results */
stats.printTotal();
/* print out metrics */
ToolsUtils.printMetrics(producer.metrics());
producer.close();
}
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
}
Aggregations