use of org.apache.kafka.clients.producer.Callback in project storm by apache.
the class KafkaBolt method process.
@Override
protected void process(final Tuple input) {
K key = null;
V message = null;
String topic = null;
try {
key = mapper.getKeyFromTuple(input);
message = mapper.getMessageFromTuple(input);
topic = topicSelector.getTopic(input);
if (topic != null) {
Callback callback = null;
if (!fireAndForget && async) {
callback = createProducerCallback(input);
} else if (providedCallback != null) {
callback = providedCallback;
}
Future<RecordMetadata> result = producer.send(new ProducerRecord<>(topic, key, message), callback);
if (!async) {
try {
result.get();
collector.ack(input);
} catch (ExecutionException err) {
collector.reportError(err);
collector.fail(input);
}
} else if (fireAndForget) {
collector.ack(input);
}
} else {
LOG.warn("skipping key = " + key + ", topic selector returned null.");
collector.ack(input);
}
} catch (Exception ex) {
collector.reportError(ex);
collector.fail(input);
}
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class KafkaStatusBackingStoreFormatTest method putTopicStateShouldOverridePreviousState.
@Test
public void putTopicStateShouldOverridePreviousState() {
TopicStatus firstTopicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
time.sleep(1000);
TopicStatus secondTopicStatus = new TopicStatus(BAR_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
String firstKey = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
String secondKey = TOPIC_STATUS_PREFIX + BAR_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, secondKey, valueCaptor.getValue());
store.read(statusRecord);
return null;
}).when(kafkaBasedLog).send(eq(secondKey), valueCaptor.capture(), any(Callback.class));
byte[] value = store.serializeTopicStatus(firstTopicStatus);
ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, firstKey, value);
store.read(statusRecord);
store.put(secondTopicStatus);
// check capture state
assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue()));
assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC));
assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class KafkaStatusBackingStoreFormatTest method putTopicState.
@Test
public void putTopicState() {
TopicStatus topicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
String key = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
return null;
}).when(kafkaBasedLog).send(eq(key), valueCaptor.capture(), any(Callback.class));
store.put(topicStatus);
// check capture state
assertEquals(topicStatus, store.parseTopicStatus(valueCaptor.getValue()));
// state is not visible until read back from the log
assertNull(store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, key, valueCaptor.getValue());
store.read(statusRecord);
assertEquals(topicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
assertEquals(Collections.singleton(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class KafkaStatusBackingStoreFormatTest method putTopicStateNonRetriableFailure.
@Test
public void putTopicStateNonRetriableFailure() {
TopicStatus topicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
String key = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, new UnknownServerException());
return null;
}).when(kafkaBasedLog).send(eq(key), valueCaptor.capture(), any(Callback.class));
// the error is logged and ignored
store.put(topicStatus);
// check capture state
assertEquals(topicStatus, store.parseTopicStatus(valueCaptor.getValue()));
// state is not visible until read back from the log
assertNull(store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class ProducerManager method publishMessage.
/**
* Returns {@link CompletableFuture} which will complete only after publishing of the given {@code remoteLogMetadata}
* is considered complete.
*
* @param remoteLogMetadata RemoteLogMetadata to be published
* @return
*/
public CompletableFuture<RecordMetadata> publishMessage(RemoteLogMetadata remoteLogMetadata) {
CompletableFuture<RecordMetadata> future = new CompletableFuture<>();
TopicIdPartition topicIdPartition = remoteLogMetadata.topicIdPartition();
int metadataPartitionNum = topicPartitioner.metadataPartition(topicIdPartition);
log.debug("Publishing metadata message of partition:[{}] into metadata topic partition:[{}] with payload: [{}]", topicIdPartition, metadataPartitionNum, remoteLogMetadata);
if (metadataPartitionNum >= rlmmConfig.metadataTopicPartitionsCount()) {
// This should never occur as long as metadata partitions always remain the same.
throw new KafkaException("Chosen partition no " + metadataPartitionNum + " must be less than the partition count: " + rlmmConfig.metadataTopicPartitionsCount());
}
try {
Callback callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(metadata);
}
}
};
producer.send(new ProducerRecord<>(rlmmConfig.remoteLogMetadataTopicName(), metadataPartitionNum, null, serde.serialize(remoteLogMetadata)), callback);
} catch (Exception ex) {
future.completeExceptionally(ex);
}
return future;
}
Aggregations