use of org.apache.kafka.common.errors.RetriableException in project kafka by apache.
the class Fetcher method validateOffsetsAsync.
/**
* For each partition which needs validation, make an asynchronous request to get the end-offsets for the partition
* with the epoch less than or equal to the epoch the partition last saw.
*
* Requests are grouped by Node for efficiency.
*/
private void validateOffsetsAsync(Map<TopicPartition, FetchPosition> partitionsToValidate) {
final Map<Node, Map<TopicPartition, FetchPosition>> regrouped = regroupFetchPositionsByLeader(partitionsToValidate);
long nextResetTimeMs = time.milliseconds() + requestTimeoutMs;
regrouped.forEach((node, fetchPositions) -> {
if (node.isEmpty()) {
metadata.requestUpdate();
return;
}
NodeApiVersions nodeApiVersions = apiVersions.get(node.idString());
if (nodeApiVersions == null) {
client.tryConnect(node);
return;
}
if (!hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) {
log.debug("Skipping validation of fetch offsets for partitions {} since the broker does not " + "support the required protocol version (introduced in Kafka 2.3)", fetchPositions.keySet());
for (TopicPartition partition : fetchPositions.keySet()) {
subscriptions.completeValidation(partition);
}
return;
}
subscriptions.setNextAllowedRetry(fetchPositions.keySet(), nextResetTimeMs);
RequestFuture<OffsetForEpochResult> future = offsetsForLeaderEpochClient.sendAsyncRequest(node, fetchPositions);
future.addListener(new RequestFutureListener<OffsetForEpochResult>() {
@Override
public void onSuccess(OffsetForEpochResult offsetsResult) {
List<SubscriptionState.LogTruncation> truncations = new ArrayList<>();
if (!offsetsResult.partitionsToRetry().isEmpty()) {
subscriptions.setNextAllowedRetry(offsetsResult.partitionsToRetry(), time.milliseconds() + retryBackoffMs);
metadata.requestUpdate();
}
// For each OffsetsForLeader response, check if the end-offset is lower than our current offset
// for the partition. If so, it means we have experienced log truncation and need to reposition
// that partition's offset.
//
// In addition, check whether the returned offset and epoch are valid. If not, then we should reset
// its offset if reset policy is configured, or throw out of range exception.
offsetsResult.endOffsets().forEach((topicPartition, respEndOffset) -> {
FetchPosition requestPosition = fetchPositions.get(topicPartition);
Optional<SubscriptionState.LogTruncation> truncationOpt = subscriptions.maybeCompleteValidation(topicPartition, requestPosition, respEndOffset);
truncationOpt.ifPresent(truncations::add);
});
if (!truncations.isEmpty()) {
maybeSetOffsetForLeaderException(buildLogTruncationException(truncations));
}
}
@Override
public void onFailure(RuntimeException e) {
subscriptions.requestFailed(fetchPositions.keySet(), time.milliseconds() + retryBackoffMs);
metadata.requestUpdate();
if (!(e instanceof RetriableException)) {
maybeSetOffsetForLeaderException(e);
}
}
});
});
}
use of org.apache.kafka.common.errors.RetriableException in project kafka by apache.
the class ConsumerCoordinator method doCommitOffsetsAsync.
private RequestFuture<Void> doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) {
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback;
future.addListener(new RequestFutureListener<Void>() {
@Override
public void onSuccess(Void value) {
if (interceptors != null)
interceptors.onCommit(offsets);
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null));
}
@Override
public void onFailure(RuntimeException e) {
Exception commitException = e;
if (e instanceof RetriableException) {
commitException = new RetriableCommitFailedException(e);
}
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException));
if (commitException instanceof FencedInstanceIdException) {
asyncCommitFenced.set(true);
}
}
});
return future;
}
use of org.apache.kafka.common.errors.RetriableException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerClient method runPollLoop.
/**
* Runs the loop polling for Kafka messages until an event is received in the event queue.
* @param pollTimeout the timeout in milliseconds used to wait for new Kafka messages if there are less than the maximum batch size.
* @param throttleSleepMillis the time in milliseconds the polling thread sleeps after each poll.
*
* @throws InterruptedException
*/
protected void runPollLoop(long pollTimeout, long throttleSleepMillis) throws InterruptedException {
if (throttleSleepMillis > 0l) {
logger.log(DEBUG_LEVEL, MsgFormatter.format("Initiating throttled polling (sleep time = {0} ms); maxPollRecords = {1}", throttleSleepMillis, getMaxPollRecords()));
} else {
logger.log(DEBUG_LEVEL, MsgFormatter.format("Initiating polling; maxPollRecords = {0}", getMaxPollRecords()));
}
synchronized (drainBuffer) {
if (!drainBuffer.isEmpty()) {
final int bufSz = drainBuffer.size();
final int capacity = messageQueue.remainingCapacity();
// restore records that have been put aside to the drain buffer
if (capacity < bufSz) {
String msg = MsgFormatter.format("drain buffer size {0} > capacity of message queue {1}", bufSz, capacity);
logger.error("runPollLoop() - " + msg);
// must restart operator.
throw new RuntimeException(msg);
}
messageQueue.addAll(drainBuffer);
final int qSize = messageQueue.size();
drainBuffer.clear();
logger.log(DEBUG_LEVEL, MsgFormatter.format("runPollLoop(): {0,number,#} consumer records added from drain buffer to the message queue. Message queue size is {1,number,#} now.", bufSz, qSize));
}
}
// continue polling for messages until a new event
// arrives in the event queue
fetchPaused = consumer.paused().size() > 0;
logger.log(DEBUG_LEVEL, "previously paused partitions: " + consumer.paused());
while (eventQueue.isEmpty()) {
boolean doPoll = true;
// can wait for 100 ms; throws InterruptedException:
try {
checkSpaceInMessageQueueAndPauseFetching(false);
} catch (IllegalStateException e) {
logger.warn("runPollLoop(): " + e.getLocalizedMessage());
// no space, could not pause - do not call poll
doPoll = false;
}
if (doPoll) {
try {
final long now = System.currentTimeMillis();
final long timeBetweenPolls = now - lastPollTimestamp;
if (lastPollTimestamp > 0) {
// this is not the first 'poll'
if (timeBetweenPolls >= maxPollIntervalMs) {
logger.warn(// $NON-NLS-1$
"Kafka client did'nt poll often enaugh for messages. " + "Maximum time between two polls is currently " + // $NON-NLS-1$
maxPollIntervalMs + // $NON-NLS-1$
" milliseconds. Consider to set consumer property '" + ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG + "' to a value higher than " + // $NON-NLS-1$
timeBetweenPolls);
}
}
lastPollTimestamp = System.currentTimeMillis();
EnqueResult r = pollAndEnqueue(pollTimeout, throttleSleepMillis > 0l);
final int nMessages = r.getNumRecords();
if (nMessages > 0) {
pollExcFilter.reset();
}
final long nQueuedBytes = r.getSumTotalSize();
final Level l = Level.DEBUG;
// final Level l = DEBUG_LEVEL;
if (logger.isEnabledFor(l) && nMessages > 0) {
logger.log(l, MsgFormatter.format("{0,number,#} records with total {1,number,#}/{2,number,#}/{3,number,#} bytes (key/value/sum) fetched and enqueued", nMessages, r.getSumKeySize(), r.getSumValueSize(), nQueuedBytes));
}
tryAdjustMinFreeMemory(nQueuedBytes, nMessages);
nPendingMessages.setValue(messageQueue.size());
if (throttleSleepMillis > 0l) {
synchronized (throttledPollWaitMonitor) {
throttledPollWaitMonitor.wait(throttleSleepMillis);
}
}
} catch (RetriableException e) {
logger.warn("Retriable exception (ignored, may succeed if retried): " + e, e);
logger.info("Going to sleep for 100 ms before next poll ...");
Thread.sleep(100l);
} catch (SerializationException e) {
// https://issues.apache.org/jira/browse/KAFKA-4740)
throw e;
} catch (Exception e) {
if (pollExcFilter.filter(e)) {
logger.warn(e);
} else {
logger.error(e);
throw new KafkaOperatorRuntimeException("Consecutive number of exceptions too high.", e);
}
}
}
}
// $NON-NLS-1$
logger.debug("Stop polling. Message in event queue: " + eventQueue.peek().getEventType());
}
Aggregations