Search in sources :

Example 1 with RetriableException

use of org.apache.kafka.common.errors.RetriableException in project kafka by apache.

the class ConsumerCoordinator method doAutoCommitOffsetsAsync.

private void doAutoCommitOffsetsAsync() {
    Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed();
    log.debug("Sending asynchronous auto-commit of offsets {} for group {}", allConsumedOffsets, groupId);
    commitOffsetsAsync(allConsumedOffsets, new OffsetCommitCallback() {

        @Override
        public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
            if (exception != null) {
                log.warn("Auto-commit of offsets {} failed for group {}: {}", offsets, groupId, exception.getMessage());
                if (exception instanceof RetriableException)
                    nextAutoCommitDeadline = Math.min(time.milliseconds() + retryBackoffMs, nextAutoCommitDeadline);
            } else {
                log.debug("Completed auto-commit of offsets {} for group {}", offsets, groupId);
            }
        }
    });
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) GroupAuthorizationException(org.apache.kafka.common.errors.GroupAuthorizationException) RetriableCommitFailedException(org.apache.kafka.clients.consumer.RetriableCommitFailedException) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) InterruptException(org.apache.kafka.common.errors.InterruptException) WakeupException(org.apache.kafka.common.errors.WakeupException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 2 with RetriableException

use of org.apache.kafka.common.errors.RetriableException in project ksql by confluentinc.

the class KafkaTopicClientImpl method executeWithRetries.

private static <T> T executeWithRetries(final Supplier<KafkaFuture<T>> supplier) throws Exception {
    int retries = 0;
    Exception lastException = null;
    while (retries < NUM_RETRIES) {
        try {
            if (retries != 0) {
                Thread.sleep(RETRY_BACKOFF_MS);
            }
            return supplier.get().get();
        } catch (ExecutionException e) {
            if (e.getCause() instanceof RetriableException) {
                retries++;
                log.info("Retrying admin request due to retriable exception. Retry no: " + retries, e);
                lastException = e;
            } else if (e.getCause() instanceof Exception) {
                throw (Exception) e.getCause();
            } else {
                throw e;
            }
        }
    }
    throw lastException;
}
Also used : ExecutionException(java.util.concurrent.ExecutionException) KafkaTopicException(io.confluent.ksql.exception.KafkaTopicException) RetriableException(org.apache.kafka.common.errors.RetriableException) KafkaResponseGetFailedException(io.confluent.ksql.exception.KafkaResponseGetFailedException) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 3 with RetriableException

use of org.apache.kafka.common.errors.RetriableException in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSourceTask method sendRecords.

/**
 * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
 * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
 * @return true if all messages were sent, false if some need to be retried
 */
private boolean sendRecords() {
    int processed = 0;
    recordBatch(toSend.size());
    final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
    for (final SourceRecord preTransformRecord : toSend) {
        final SourceRecord record = transformationChain.apply(preTransformRecord);
        if (record == null) {
            counter.skipRecord();
            commitTaskRecord(preTransformRecord);
            continue;
        }
        RecordHeaders headers = convertHeaderFor(record);
        byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
        byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
        final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
        log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
        // messages and update the offsets.
        synchronized (this) {
            if (!lastSendFailed) {
                if (!flushing) {
                    outstandingMessages.put(producerRecord, producerRecord);
                } else {
                    outstandingMessagesBacklog.put(producerRecord, producerRecord);
                }
                // Offsets are converted & serialized in the OffsetWriter
                offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
            }
        }
        try {
            final String topic = producerRecord.topic();
            producer.send(producerRecord, new Callback() {

                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        // Given the default settings for zero data loss, this should basically never happen --
                        // between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
                        // timeouts, callbacks with exceptions should never be invoked in practice. If the
                        // user overrode these settings, the best we can do is notify them of the failure via
                        // logging.
                        log.error("{} failed to send record to {}: {}", this, topic, e);
                        log.debug("{} Failed record: {}", this, preTransformRecord);
                    } else {
                        log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                        commitTaskRecord(preTransformRecord);
                    }
                    recordSent(producerRecord);
                    counter.completeRecord();
                }
            });
            lastSendFailed = false;
        } catch (RetriableException e) {
            log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
            toSend = toSend.subList(processed, toSend.size());
            lastSendFailed = true;
            counter.retryRemaining();
            return false;
        } catch (KafkaException e) {
            throw new ConnectException("Unrecoverable exception trying to send", e);
        }
        processed++;
    }
    toSend = null;
    return true;
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) RetriableException(org.apache.kafka.common.errors.RetriableException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 4 with RetriableException

use of org.apache.kafka.common.errors.RetriableException in project apache-kafka-on-k8s by banzaicloud.

the class Fetcher method resetOffsetsAsync.

private void resetOffsetsAsync(Map<TopicPartition, Long> partitionResetTimestamps) {
    // Add the topics to the metadata to do a single metadata fetch.
    for (TopicPartition tp : partitionResetTimestamps.keySet()) metadata.add(tp.topic());
    Map<Node, Map<TopicPartition, Long>> timestampsToSearchByNode = groupListOffsetRequests(partitionResetTimestamps);
    for (Map.Entry<Node, Map<TopicPartition, Long>> entry : timestampsToSearchByNode.entrySet()) {
        Node node = entry.getKey();
        final Map<TopicPartition, Long> resetTimestamps = entry.getValue();
        subscriptions.setResetPending(resetTimestamps.keySet(), time.milliseconds() + requestTimeoutMs);
        RequestFuture<ListOffsetResult> future = sendListOffsetRequest(node, resetTimestamps, false);
        future.addListener(new RequestFutureListener<ListOffsetResult>() {

            @Override
            public void onSuccess(ListOffsetResult result) {
                if (!result.partitionsToRetry.isEmpty()) {
                    subscriptions.resetFailed(result.partitionsToRetry, time.milliseconds() + retryBackoffMs);
                    metadata.requestUpdate();
                }
                for (Map.Entry<TopicPartition, OffsetData> fetchedOffset : result.fetchedOffsets.entrySet()) {
                    TopicPartition partition = fetchedOffset.getKey();
                    OffsetData offsetData = fetchedOffset.getValue();
                    Long requestedResetTimestamp = resetTimestamps.get(partition);
                    resetOffsetIfNeeded(partition, requestedResetTimestamp, offsetData);
                }
            }

            @Override
            public void onFailure(RuntimeException e) {
                subscriptions.resetFailed(resetTimestamps.keySet(), time.milliseconds() + retryBackoffMs);
                metadata.requestUpdate();
                if (!(e instanceof RetriableException) && !cachedListOffsetsException.compareAndSet(null, e))
                    log.error("Discarding error in ListOffsetResponse because another error is pending", e);
            }
        });
    }
}
Also used : Node(org.apache.kafka.common.Node) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 5 with RetriableException

use of org.apache.kafka.common.errors.RetriableException in project storm by apache.

the class KafkaOffsetMetric method getValueAndReset.

@Override
public Object getValueAndReset() {
    Map<TopicPartition, OffsetManager> offsetManagers = offsetManagerSupplier.get();
    Consumer<K, V> consumer = consumerSupplier.get();
    if (offsetManagers == null || offsetManagers.isEmpty() || consumer == null) {
        LOG.debug("Metrics Tick: offsetManagers or kafkaConsumer is null.");
        return null;
    }
    Map<String, TopicMetrics> topicMetricsMap = new HashMap<>();
    Set<TopicPartition> topicPartitions = offsetManagers.keySet();
    Map<TopicPartition, Long> beginningOffsets;
    Map<TopicPartition, Long> endOffsets;
    try {
        beginningOffsets = consumer.beginningOffsets(topicPartitions);
        endOffsets = consumer.endOffsets(topicPartitions);
    } catch (RetriableException e) {
        LOG.warn("Failed to get offsets from Kafka! Will retry on next metrics tick.", e);
        return null;
    }
    // map to hold partition level and topic level metrics
    Map<String, Long> result = new HashMap<>();
    for (Map.Entry<TopicPartition, OffsetManager> entry : offsetManagers.entrySet()) {
        TopicPartition topicPartition = entry.getKey();
        OffsetManager offsetManager = entry.getValue();
        long latestTimeOffset = endOffsets.get(topicPartition);
        long earliestTimeOffset = beginningOffsets.get(topicPartition);
        long latestEmittedOffset = offsetManager.getLatestEmittedOffset();
        long latestCompletedOffset = offsetManager.getCommittedOffset();
        long spoutLag = latestTimeOffset - latestCompletedOffset;
        long recordsInPartition = latestTimeOffset - earliestTimeOffset;
        String metricPath = topicPartition.topic() + "/partition_" + topicPartition.partition();
        result.put(metricPath + "/" + "spoutLag", spoutLag);
        result.put(metricPath + "/" + "earliestTimeOffset", earliestTimeOffset);
        result.put(metricPath + "/" + "latestTimeOffset", latestTimeOffset);
        result.put(metricPath + "/" + "latestEmittedOffset", latestEmittedOffset);
        result.put(metricPath + "/" + "latestCompletedOffset", latestCompletedOffset);
        result.put(metricPath + "/" + "recordsInPartition", recordsInPartition);
        TopicMetrics topicMetrics = topicMetricsMap.get(topicPartition.topic());
        if (topicMetrics == null) {
            topicMetrics = new TopicMetrics();
            topicMetricsMap.put(topicPartition.topic(), topicMetrics);
        }
        topicMetrics.totalSpoutLag += spoutLag;
        topicMetrics.totalEarliestTimeOffset += earliestTimeOffset;
        topicMetrics.totalLatestTimeOffset += latestTimeOffset;
        topicMetrics.totalLatestEmittedOffset += latestEmittedOffset;
        topicMetrics.totalLatestCompletedOffset += latestCompletedOffset;
        topicMetrics.totalRecordsInPartitions += recordsInPartition;
    }
    for (Map.Entry<String, TopicMetrics> e : topicMetricsMap.entrySet()) {
        String topic = e.getKey();
        TopicMetrics topicMetrics = e.getValue();
        result.put(topic + "/" + "totalSpoutLag", topicMetrics.totalSpoutLag);
        result.put(topic + "/" + "totalEarliestTimeOffset", topicMetrics.totalEarliestTimeOffset);
        result.put(topic + "/" + "totalLatestTimeOffset", topicMetrics.totalLatestTimeOffset);
        result.put(topic + "/" + "totalLatestEmittedOffset", topicMetrics.totalLatestEmittedOffset);
        result.put(topic + "/" + "totalLatestCompletedOffset", topicMetrics.totalLatestCompletedOffset);
        result.put(topic + "/" + "totalRecordsInPartitions", topicMetrics.totalRecordsInPartitions);
    }
    LOG.debug("Metrics Tick: value : {}", result);
    return result;
}
Also used : HashMap(java.util.HashMap) OffsetManager(org.apache.storm.kafka.spout.internal.OffsetManager) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException)

Aggregations

RetriableException (org.apache.kafka.common.errors.RetriableException)18 KafkaException (org.apache.kafka.common.KafkaException)9 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)8 HashMap (java.util.HashMap)7 Map (java.util.Map)7 LinkedHashMap (java.util.LinkedHashMap)6 TopicPartition (org.apache.kafka.common.TopicPartition)6 ArrayList (java.util.ArrayList)5 ClientResponse (org.apache.kafka.clients.ClientResponse)5 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)5 TimeoutException (org.apache.kafka.common.errors.TimeoutException)5 Errors (org.apache.kafka.common.protocol.Errors)5 List (java.util.List)4 OffsetCommitCallback (org.apache.kafka.clients.consumer.OffsetCommitCallback)4 RetriableCommitFailedException (org.apache.kafka.clients.consumer.RetriableCommitFailedException)4 Cluster (org.apache.kafka.common.Cluster)4 Node (org.apache.kafka.common.Node)4 GroupAuthorizationException (org.apache.kafka.common.errors.GroupAuthorizationException)4 InterruptException (org.apache.kafka.common.errors.InterruptException)4 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)4