use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordCollectorImpl method send.
@Override
public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer, StreamPartitioner<? super K, ? super V> partitioner) {
checkForException();
byte[] keyBytes = keySerializer.serialize(topic, key);
byte[] valBytes = valueSerializer.serialize(topic, value);
if (partition == null && partitioner != null) {
List<PartitionInfo> partitions = this.producer.partitionsFor(topic);
if (partitions != null && partitions.size() > 0)
partition = partitioner.partition(key, value, partitions.size());
}
ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);
for (int attempt = 1; attempt <= MAX_SEND_ATTEMPTS; attempt++) {
try {
this.producer.send(serializedRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception == null) {
if (sendException != null) {
return;
}
TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
offsets.put(tp, metadata.offset());
} else {
sendException = exception;
log.error("{} Error sending record to topic {}. No more offsets will be recorded for this task and the exception will eventually be thrown", logPrefix, topic, exception);
}
}
});
return;
} catch (TimeoutException e) {
if (attempt == MAX_SEND_ATTEMPTS) {
throw new StreamsException(String.format("%s Failed to send record to topic %s after %d attempts", logPrefix, topic, attempt));
}
log.warn("{} Timeout exception caught when sending record to topic {} attempt {}", logPrefix, topic, attempt);
Utils.sleep(SEND_RETRY_BACKOFF);
}
}
}
use of org.apache.kafka.clients.producer.Callback in project storm by apache.
the class KafkaBoltTest method executeWithByteArrayKeyAndMessageAsync.
/* test asynchronous sending (default) */
@Test
public void executeWithByteArrayKeyAndMessageAsync() {
boolean async = true;
boolean fireAndForget = false;
String keyString = "test-key";
String messageString = "test-message";
byte[] key = keyString.getBytes();
byte[] message = messageString.getBytes();
final Tuple tuple = generateTestTuple(key, message);
final ByteBufferMessageSet mockMsg = mockSingleMessage(key, message);
simpleConsumer.close();
simpleConsumer = mockSimpleConsumer(mockMsg);
KafkaProducer<?, ?> producer = mock(KafkaProducer.class);
when(producer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Future>() {
@Override
public Future answer(InvocationOnMock invocationOnMock) throws Throwable {
Callback cb = (Callback) invocationOnMock.getArguments()[1];
cb.onCompletion(null, null);
return mock(Future.class);
}
});
bolt = generateDefaultSerializerBolt(async, fireAndForget, producer);
bolt.execute(tuple);
verify(collector).ack(tuple);
verifyMessage(keyString, messageString);
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class ProducerPerformance method main.
public static void main(String[] args) throws Exception {
ArgumentParser parser = argParser();
try {
Namespace res = parser.parseArgs(args);
/* parse args */
String topicName = res.getString("topic");
long numRecords = res.getLong("numRecords");
Integer recordSize = res.getInt("recordSize");
int throughput = res.getInt("throughput");
List<String> producerProps = res.getList("producerConfig");
String producerConfig = res.getString("producerConfigFile");
String payloadFilePath = res.getString("payloadFile");
// since default value gets printed with the help text, we are escaping \n there and replacing it with correct value here.
String payloadDelimiter = res.getString("payloadDelimiter").equals("\\n") ? "\n" : res.getString("payloadDelimiter");
if (producerProps == null && producerConfig == null) {
throw new ArgumentParserException("Either --producer-props or --producer.config must be specified.", parser);
}
List<byte[]> payloadByteList = new ArrayList<>();
if (payloadFilePath != null) {
Path path = Paths.get(payloadFilePath);
System.out.println("Reading payloads from: " + path.toAbsolutePath());
if (Files.notExists(path) || Files.size(path) == 0) {
throw new IllegalArgumentException("File does not exist or empty file provided.");
}
String[] payloadList = new String(Files.readAllBytes(path), "UTF-8").split(payloadDelimiter);
System.out.println("Number of messages read: " + payloadList.length);
for (String payload : payloadList) {
payloadByteList.add(payload.getBytes(StandardCharsets.UTF_8));
}
}
Properties props = new Properties();
if (producerConfig != null) {
props.putAll(Utils.loadProps(producerConfig));
}
if (producerProps != null)
for (String prop : producerProps) {
String[] pieces = prop.split("=");
if (pieces.length != 2)
throw new IllegalArgumentException("Invalid property: " + prop);
props.put(pieces[0], pieces[1]);
}
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(props);
/* setup perf test */
byte[] payload = null;
Random random = new Random(0);
if (recordSize != null) {
payload = new byte[recordSize];
for (int i = 0; i < payload.length; ++i) payload[i] = (byte) (random.nextInt(26) + 65);
}
ProducerRecord<byte[], byte[]> record;
Stats stats = new Stats(numRecords, 5000);
long startMs = System.currentTimeMillis();
ThroughputThrottler throttler = new ThroughputThrottler(throughput, startMs);
for (int i = 0; i < numRecords; i++) {
if (payloadFilePath != null) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
}
record = new ProducerRecord<>(topicName, payload);
long sendStartMs = System.currentTimeMillis();
Callback cb = stats.nextCompletion(sendStartMs, payload.length, stats);
producer.send(record, cb);
if (throttler.shouldThrottle(i, sendStartMs)) {
throttler.throttle();
}
}
/* print final results */
producer.close();
stats.printTotal();
} catch (ArgumentParserException e) {
if (args.length == 0) {
parser.printHelp();
Exit.exit(0);
} else {
parser.handleError(e);
Exit.exit(1);
}
}
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionAfterMaxAttempts.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionAfterMaxAttempts() throws Exception {
RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
throw new TimeoutException();
}
}, "test");
collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnFlushIfASendFailed.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnFlushIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test");
collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
Aggregations