use of org.apache.kafka.common.errors.ApiException in project camel by apache.
the class KafkaProducerTest method processAsyncSendsMessageWithException.
@Test
public void processAsyncSendsMessageWithException() throws Exception {
endpoint.getConfiguration().setTopic("sometopic");
Mockito.when(exchange.getIn()).thenReturn(in);
Mockito.when(exchange.getOut()).thenReturn(out);
// setup the exception here
org.apache.kafka.clients.producer.KafkaProducer kp = producer.getKafkaProducer();
Mockito.when(kp.send(Matchers.any(ProducerRecord.class), Matchers.any(Callback.class))).thenThrow(new ApiException());
in.setHeader(KafkaConstants.PARTITION_KEY, 4);
producer.process(exchange, callback);
ArgumentCaptor<Callback> callBackCaptor = ArgumentCaptor.forClass(Callback.class);
Mockito.verify(producer.getKafkaProducer()).send(Matchers.any(ProducerRecord.class), callBackCaptor.capture());
Mockito.verify(exchange).setException(Matchers.isA(ApiException.class));
Mockito.verify(callback).done(Matchers.eq(true));
Callback kafkaCallback = callBackCaptor.getValue();
kafkaCallback.onCompletion(new RecordMetadata(null, 1, 1), null);
assertRecordMetadataExists();
}
use of org.apache.kafka.common.errors.ApiException in project camel by apache.
the class KafkaProducerTest method processSendsMessageWithException.
@Test(expected = Exception.class)
@SuppressWarnings({ "unchecked" })
public void processSendsMessageWithException() throws Exception {
endpoint.getConfiguration().setTopic("sometopic");
// setup the exception here
org.apache.kafka.clients.producer.KafkaProducer kp = producer.getKafkaProducer();
Mockito.when(kp.send(Matchers.any(ProducerRecord.class))).thenThrow(new ApiException());
Mockito.when(exchange.getIn()).thenReturn(in);
in.setHeader(KafkaConstants.PARTITION_KEY, 4);
producer.process(exchange);
assertRecordMetadataExists();
}
use of org.apache.kafka.common.errors.ApiException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaProducer method doSend.
/**
* Implementation of asynchronously send a record to a topic.
*/
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
TopicPartition tp = null;
try {
// first make sure the metadata for the topic is available
ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
Cluster cluster = clusterAndWaitTime.cluster;
byte[] serializedKey;
try {
serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce);
}
byte[] serializedValue;
try {
serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce);
}
int partition = partition(record, serializedKey, serializedValue, cluster);
tp = new TopicPartition(record.topic(), partition);
setReadOnly(record.headers());
Header[] headers = record.headers().toArray();
int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers);
ensureValidRecordSize(serializedSize);
long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
// producer callback will make sure to call both 'callback' and interceptor callback
Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp);
if (transactionManager != null && transactionManager.isTransactional())
transactionManager.maybeAddPartitionToTransaction(tp);
RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
if (result.batchIsFull || result.newBatchCreated) {
log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
this.sender.wakeup();
}
return result.future;
// handling exceptions and record the errors;
// for API exceptions return them in the future,
// for other exceptions throw directly
} catch (ApiException e) {
log.debug("Exception occurred during message send:", e);
if (callback != null)
callback.onCompletion(null, e);
this.errors.record();
this.interceptors.onSendError(record, tp, e);
return new FutureFailure(e);
} catch (InterruptedException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw new InterruptException(e);
} catch (BufferExhaustedException e) {
this.errors.record();
this.metrics.sensor("buffer-exhausted-records").record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (KafkaException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (Exception e) {
// we notify interceptor about all exceptions, since onSend is called before anything else in this method
this.interceptors.onSendError(record, tp, e);
throw e;
}
}
use of org.apache.kafka.common.errors.ApiException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method deleteTopics.
@Override
public DeleteTopicsResult deleteTopics(final Collection<String> topicNames, DeleteTopicsOptions options) {
final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(topicNames.size());
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<Void>());
}
}
final long now = time.milliseconds();
Call call = new Call("deleteTopics", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(new HashSet<>(topicNames), timeoutMs);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
// Handle server responses for particular topics.
for (Map.Entry<String, Errors> entry : response.errors().entrySet()) {
KafkaFutureImpl<Void> future = topicFutures.get(entry.getKey());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", entry.getKey());
} else {
ApiException exception = entry.getValue().exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
// The server should send back a response for every topic. But do a sanity check anyway.
for (Map.Entry<String, KafkaFutureImpl<Void>> entry : topicFutures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
if (!future.isDone()) {
future.completeExceptionally(new ApiException("The server response did not " + "contain a reference to node " + entry.getKey()));
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicNames.isEmpty()) {
runnable.call(call, now);
}
return new DeleteTopicsResult(new HashMap<String, KafkaFuture<Void>>(topicFutures));
}
use of org.apache.kafka.common.errors.ApiException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method createTopics.
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics, final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(newTopics.size());
final Map<String, CreateTopicsRequest.TopicDetails> topicsMap = new HashMap<>(newTopics.size());
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" + newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<Void>());
topicsMap.put(newTopic.name(), newTopic.convertToTopicDetails());
}
}
final long now = time.milliseconds();
Call call = new Call("createTopics", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
return new CreateTopicsRequest.Builder(topicsMap, timeoutMs, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
// Handle server responses for particular topics.
for (Map.Entry<String, ApiError> entry : response.errors().entrySet()) {
KafkaFutureImpl<Void> future = topicFutures.get(entry.getKey());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", entry.getKey());
} else {
ApiException exception = entry.getValue().exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
// The server should send back a response for every topic. But do a sanity check anyway.
for (Map.Entry<String, KafkaFutureImpl<Void>> entry : topicFutures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
if (!future.isDone()) {
future.completeExceptionally(new ApiException("The server response did not " + "contain a reference to node " + entry.getKey()));
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicsMap.isEmpty()) {
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<String, KafkaFuture<Void>>(topicFutures));
}
Aggregations