use of org.apache.kafka.common.KafkaException in project nakadi by zalando.
the class NakadiKafkaConsumerTest method whenReadEventsThenNakadiException.
@Test
@SuppressWarnings("unchecked")
public void whenReadEventsThenNakadiException() {
// ARRANGE //
final ImmutableList<RuntimeException> exceptions = ImmutableList.of(new NoOffsetForPartitionException(new TopicPartition("", 0)), new KafkaException());
int numberOfNakadiExceptions = 0;
for (final Exception exception : exceptions) {
final KafkaConsumer<byte[], byte[]> kafkaConsumerMock = mock(KafkaConsumer.class);
when(kafkaConsumerMock.poll(POLL_TIMEOUT)).thenThrow(exception);
try {
// ACT //
final NakadiKafkaConsumer consumer = new NakadiKafkaConsumer(kafkaConsumerMock, ImmutableList.of(), createTpTimelineMap(), POLL_TIMEOUT);
consumer.readEvents();
// ASSERT //
fail("An Exception was expected to be be thrown");
} catch (final Exception e) {
numberOfNakadiExceptions++;
}
}
assertThat("We should get a NakadiException for every call", numberOfNakadiExceptions, equalTo(exceptions.size()));
}
use of org.apache.kafka.common.KafkaException in project nakadi by zalando.
the class KafkaTopicRepositoryTest method createKafkaFactory.
@SuppressWarnings("unchecked")
private KafkaFactory createKafkaFactory() {
// Consumer
final Consumer consumer = mock(Consumer.class);
allTopics().forEach(topic -> when(consumer.partitionsFor(topic)).thenReturn(partitionsOfTopic(topic)));
doAnswer(invocation -> {
offsetMode = ConsumerOffsetMode.EARLIEST;
return null;
}).when(consumer).seekToBeginning(anyVararg());
doAnswer(invocation -> {
offsetMode = ConsumerOffsetMode.LATEST;
return null;
}).when(consumer).seekToEnd(anyVararg());
when(consumer.position(any())).thenAnswer(invocation -> {
final org.apache.kafka.common.TopicPartition tp = (org.apache.kafka.common.TopicPartition) invocation.getArguments()[0];
return PARTITIONS.stream().filter(ps -> ps.topic.equals(tp.topic()) && ps.partition == tp.partition()).findFirst().map(ps -> offsetMode == ConsumerOffsetMode.LATEST ? ps.latestOffset : ps.earliestOffset).orElseThrow(KafkaException::new);
});
// KafkaProducer
when(kafkaProducer.send(EXPECTED_PRODUCER_RECORD)).thenReturn(mock(Future.class));
// KafkaFactory
final KafkaFactory kafkaFactory = mock(KafkaFactory.class);
when(kafkaFactory.getConsumer(KAFKA_CLIENT_ID)).thenReturn(consumer);
when(kafkaFactory.getConsumer()).thenReturn(consumer);
when(kafkaFactory.takeProducer()).thenReturn(kafkaProducer);
return kafkaFactory;
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaProducer method waitOnMetadata.
/**
* Wait for cluster metadata including partitions for the given topic to be available.
* @param topic The topic we want metadata for
* @param partition A specific partition expected to exist in metadata, or null if there's no preference
* @param maxWaitMs The maximum time in ms for waiting on the metadata
* @return The cluster containing topic metadata and the amount of time we waited in ms
*/
private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long maxWaitMs) throws InterruptedException {
// add topic to metadata topic list if it is not there already and reset expiry
metadata.add(topic);
Cluster cluster = metadata.fetch();
Integer partitionsCount = cluster.partitionCountForTopic(topic);
// or within the known partition range
if (partitionsCount != null && (partition == null || partition < partitionsCount))
return new ClusterAndWaitTime(cluster, 0);
long begin = time.milliseconds();
long remainingWaitMs = maxWaitMs;
long elapsed;
// is stale and the number of partitions for this topic has increased in the meantime.
do {
log.trace("Requesting metadata update for topic {}.", topic);
metadata.add(topic);
int version = metadata.requestUpdate();
sender.wakeup();
try {
metadata.awaitUpdate(version, remainingWaitMs);
} catch (TimeoutException ex) {
// Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs
throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
}
cluster = metadata.fetch();
elapsed = time.milliseconds() - begin;
if (elapsed >= maxWaitMs)
throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
if (cluster.unauthorizedTopics().contains(topic))
throw new TopicAuthorizationException(topic);
remainingWaitMs = maxWaitMs - elapsed;
partitionsCount = cluster.partitionCountForTopic(topic);
} while (partitionsCount == null);
if (partition != null && partition >= partitionsCount) {
throw new KafkaException(String.format("Invalid partition given with record: %d is not in the range [0...%d).", partition, partitionsCount));
}
return new ClusterAndWaitTime(cluster, elapsed);
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaProducer method doSend.
/**
* Implementation of asynchronously send a record to a topic.
*/
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
TopicPartition tp = null;
try {
// first make sure the metadata for the topic is available
ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
Cluster cluster = clusterAndWaitTime.cluster;
byte[] serializedKey;
try {
serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce);
}
byte[] serializedValue;
try {
serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce);
}
int partition = partition(record, serializedKey, serializedValue, cluster);
tp = new TopicPartition(record.topic(), partition);
setReadOnly(record.headers());
Header[] headers = record.headers().toArray();
int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers);
ensureValidRecordSize(serializedSize);
long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
// producer callback will make sure to call both 'callback' and interceptor callback
Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp);
if (transactionManager != null && transactionManager.isTransactional())
transactionManager.maybeAddPartitionToTransaction(tp);
RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
if (result.batchIsFull || result.newBatchCreated) {
log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
this.sender.wakeup();
}
return result.future;
// handling exceptions and record the errors;
// for API exceptions return them in the future,
// for other exceptions throw directly
} catch (ApiException e) {
log.debug("Exception occurred during message send:", e);
if (callback != null)
callback.onCompletion(null, e);
this.errors.record();
this.interceptors.onSendError(record, tp, e);
return new FutureFailure(e);
} catch (InterruptedException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw new InterruptException(e);
} catch (BufferExhaustedException e) {
this.errors.record();
this.metrics.sensor("buffer-exhausted-records").record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (KafkaException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (Exception e) {
// we notify interceptor about all exceptions, since onSend is called before anything else in this method
this.interceptors.onSendError(record, tp, e);
throw e;
}
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class Sender method maybeSendTransactionalRequest.
private boolean maybeSendTransactionalRequest(long now) {
if (transactionManager.isCompleting() && accumulator.hasIncomplete()) {
if (transactionManager.isAborting())
accumulator.abortUndrainedBatches(new KafkaException("Failing batch since transaction was aborted"));
// be correct which would lead to an OutOfSequenceException.
if (!accumulator.flushInProgress())
accumulator.beginFlush();
}
TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequestHandler(accumulator.hasIncomplete());
if (nextRequestHandler == null)
return false;
AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
while (running) {
Node targetNode = null;
try {
if (nextRequestHandler.needsCoordinator()) {
targetNode = transactionManager.coordinator(nextRequestHandler.coordinatorType());
if (targetNode == null) {
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
if (!NetworkClientUtils.awaitReady(client, targetNode, time, requestTimeout)) {
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
} else {
targetNode = awaitLeastLoadedNodeReady(requestTimeout);
}
if (targetNode != null) {
if (nextRequestHandler.isRetry())
time.sleep(nextRequestHandler.retryBackoffMs());
ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, now, true, nextRequestHandler);
transactionManager.setInFlightTransactionalRequestCorrelationId(clientRequest.correlationId());
log.debug("Sending transactional request {} to node {}", requestBuilder, targetNode);
client.send(clientRequest, now);
return true;
}
} catch (IOException e) {
log.debug("Disconnect from {} while trying to send request {}. Going " + "to back off and retry", targetNode, requestBuilder);
if (nextRequestHandler.needsCoordinator()) {
// We break here so that we pick up the FindCoordinator request immediately.
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
}
time.sleep(retryBackoffMs);
metadata.requestUpdate();
}
transactionManager.retry(nextRequestHandler);
return true;
}
Aggregations