use of org.apache.kafka.common.errors.RetriableException in project kafka by apache.
the class ConsumerCoordinator method doAutoCommitOffsetsAsync.
private void doAutoCommitOffsetsAsync() {
Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed();
log.debug("Sending asynchronous auto-commit of offsets {} for group {}", allConsumedOffsets, groupId);
commitOffsetsAsync(allConsumedOffsets, new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
if (exception != null) {
log.warn("Auto-commit of offsets {} failed for group {}: {}", offsets, groupId, exception.getMessage());
if (exception instanceof RetriableException)
nextAutoCommitDeadline = Math.min(time.milliseconds() + retryBackoffMs, nextAutoCommitDeadline);
} else {
log.debug("Completed auto-commit of offsets {} for group {}", offsets, groupId);
}
}
});
}
use of org.apache.kafka.common.errors.RetriableException in project ksql by confluentinc.
the class KafkaTopicClientImpl method executeWithRetries.
private static <T> T executeWithRetries(final Supplier<KafkaFuture<T>> supplier) throws Exception {
int retries = 0;
Exception lastException = null;
while (retries < NUM_RETRIES) {
try {
if (retries != 0) {
Thread.sleep(RETRY_BACKOFF_MS);
}
return supplier.get().get();
} catch (ExecutionException e) {
if (e.getCause() instanceof RetriableException) {
retries++;
log.info("Retrying admin request due to retriable exception. Retry no: " + retries, e);
lastException = e;
} else if (e.getCause() instanceof Exception) {
throw (Exception) e.getCause();
} else {
throw e;
}
}
}
throw lastException;
}
use of org.apache.kafka.common.errors.RetriableException in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.common.errors.RetriableException in project apache-kafka-on-k8s by banzaicloud.
the class Fetcher method resetOffsetsAsync.
private void resetOffsetsAsync(Map<TopicPartition, Long> partitionResetTimestamps) {
// Add the topics to the metadata to do a single metadata fetch.
for (TopicPartition tp : partitionResetTimestamps.keySet()) metadata.add(tp.topic());
Map<Node, Map<TopicPartition, Long>> timestampsToSearchByNode = groupListOffsetRequests(partitionResetTimestamps);
for (Map.Entry<Node, Map<TopicPartition, Long>> entry : timestampsToSearchByNode.entrySet()) {
Node node = entry.getKey();
final Map<TopicPartition, Long> resetTimestamps = entry.getValue();
subscriptions.setResetPending(resetTimestamps.keySet(), time.milliseconds() + requestTimeoutMs);
RequestFuture<ListOffsetResult> future = sendListOffsetRequest(node, resetTimestamps, false);
future.addListener(new RequestFutureListener<ListOffsetResult>() {
@Override
public void onSuccess(ListOffsetResult result) {
if (!result.partitionsToRetry.isEmpty()) {
subscriptions.resetFailed(result.partitionsToRetry, time.milliseconds() + retryBackoffMs);
metadata.requestUpdate();
}
for (Map.Entry<TopicPartition, OffsetData> fetchedOffset : result.fetchedOffsets.entrySet()) {
TopicPartition partition = fetchedOffset.getKey();
OffsetData offsetData = fetchedOffset.getValue();
Long requestedResetTimestamp = resetTimestamps.get(partition);
resetOffsetIfNeeded(partition, requestedResetTimestamp, offsetData);
}
}
@Override
public void onFailure(RuntimeException e) {
subscriptions.resetFailed(resetTimestamps.keySet(), time.milliseconds() + retryBackoffMs);
metadata.requestUpdate();
if (!(e instanceof RetriableException) && !cachedListOffsetsException.compareAndSet(null, e))
log.error("Discarding error in ListOffsetResponse because another error is pending", e);
}
});
}
}
use of org.apache.kafka.common.errors.RetriableException in project storm by apache.
the class KafkaOffsetMetric method getValueAndReset.
@Override
public Object getValueAndReset() {
Map<TopicPartition, OffsetManager> offsetManagers = offsetManagerSupplier.get();
Consumer<K, V> consumer = consumerSupplier.get();
if (offsetManagers == null || offsetManagers.isEmpty() || consumer == null) {
LOG.debug("Metrics Tick: offsetManagers or kafkaConsumer is null.");
return null;
}
Map<String, TopicMetrics> topicMetricsMap = new HashMap<>();
Set<TopicPartition> topicPartitions = offsetManagers.keySet();
Map<TopicPartition, Long> beginningOffsets;
Map<TopicPartition, Long> endOffsets;
try {
beginningOffsets = consumer.beginningOffsets(topicPartitions);
endOffsets = consumer.endOffsets(topicPartitions);
} catch (RetriableException e) {
LOG.warn("Failed to get offsets from Kafka! Will retry on next metrics tick.", e);
return null;
}
// map to hold partition level and topic level metrics
Map<String, Long> result = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetManager> entry : offsetManagers.entrySet()) {
TopicPartition topicPartition = entry.getKey();
OffsetManager offsetManager = entry.getValue();
long latestTimeOffset = endOffsets.get(topicPartition);
long earliestTimeOffset = beginningOffsets.get(topicPartition);
long latestEmittedOffset = offsetManager.getLatestEmittedOffset();
long latestCompletedOffset = offsetManager.getCommittedOffset();
long spoutLag = latestTimeOffset - latestCompletedOffset;
long recordsInPartition = latestTimeOffset - earliestTimeOffset;
String metricPath = topicPartition.topic() + "/partition_" + topicPartition.partition();
result.put(metricPath + "/" + "spoutLag", spoutLag);
result.put(metricPath + "/" + "earliestTimeOffset", earliestTimeOffset);
result.put(metricPath + "/" + "latestTimeOffset", latestTimeOffset);
result.put(metricPath + "/" + "latestEmittedOffset", latestEmittedOffset);
result.put(metricPath + "/" + "latestCompletedOffset", latestCompletedOffset);
result.put(metricPath + "/" + "recordsInPartition", recordsInPartition);
TopicMetrics topicMetrics = topicMetricsMap.get(topicPartition.topic());
if (topicMetrics == null) {
topicMetrics = new TopicMetrics();
topicMetricsMap.put(topicPartition.topic(), topicMetrics);
}
topicMetrics.totalSpoutLag += spoutLag;
topicMetrics.totalEarliestTimeOffset += earliestTimeOffset;
topicMetrics.totalLatestTimeOffset += latestTimeOffset;
topicMetrics.totalLatestEmittedOffset += latestEmittedOffset;
topicMetrics.totalLatestCompletedOffset += latestCompletedOffset;
topicMetrics.totalRecordsInPartitions += recordsInPartition;
}
for (Map.Entry<String, TopicMetrics> e : topicMetricsMap.entrySet()) {
String topic = e.getKey();
TopicMetrics topicMetrics = e.getValue();
result.put(topic + "/" + "totalSpoutLag", topicMetrics.totalSpoutLag);
result.put(topic + "/" + "totalEarliestTimeOffset", topicMetrics.totalEarliestTimeOffset);
result.put(topic + "/" + "totalLatestTimeOffset", topicMetrics.totalLatestTimeOffset);
result.put(topic + "/" + "totalLatestEmittedOffset", topicMetrics.totalLatestEmittedOffset);
result.put(topic + "/" + "totalLatestCompletedOffset", topicMetrics.totalLatestCompletedOffset);
result.put(topic + "/" + "totalRecordsInPartitions", topicMetrics.totalRecordsInPartitions);
}
LOG.debug("Metrics Tick: value : {}", result);
return result;
}
Aggregations