use of org.apache.kafka.clients.consumer.NoOffsetForPartitionException in project apex-malhar by apache.
the class KafkaConsumerWrapper method emitImmediately.
public void emitImmediately(Map<AbstractKafkaPartitioner.PartitionMeta, Pair<Long, Long>> windowData) {
for (Map.Entry<AbstractKafkaPartitioner.PartitionMeta, Pair<Long, Long>> windowEntry : windowData.entrySet()) {
AbstractKafkaPartitioner.PartitionMeta meta = windowEntry.getKey();
Pair<Long, Long> replayOffsetSize = windowEntry.getValue();
AbstractKafkaConsumer kc = consumers.get(meta.getCluster());
if (kc == null && kc.isConsumerContainsPartition(windowEntry.getKey().getTopicPartition())) {
throw new RuntimeException("Coundn't find consumer to replay the message PartitionMeta : " + meta);
}
// pause other partition
for (TopicPartition tp : kc.getPartitions()) {
if (meta.getTopicPartition().equals(tp)) {
kc.resumePartition(tp);
} else {
try {
kc.positionPartition(tp);
} catch (NoOffsetForPartitionException e) {
// the poll() method of a consumer will throw exception
// if any of subscribed consumers not initialized with position
handleNoOffsetForPartitionException(e, kc);
}
kc.pausePartition(tp);
}
}
// set the offset to window start offset
kc.seekToOffset(meta.getTopicPartition(), replayOffsetSize.getLeft());
long windowCount = replayOffsetSize.getRight();
while (windowCount > 0) {
try {
ConsumerRecords<byte[], byte[]> records = kc.pollRecords(ownerOperator.getConsumerTimeout());
for (Iterator<ConsumerRecord<byte[], byte[]>> cri = records.iterator(); cri.hasNext() && windowCount > 0; ) {
ownerOperator.emitTuple(meta.getCluster(), cri.next());
windowCount--;
}
} catch (NoOffsetForPartitionException e) {
throw new RuntimeException("Couldn't replay the offset", e);
}
}
// set the offset after window
kc.seekToOffset(meta.getTopicPartition(), replayOffsetSize.getLeft() + replayOffsetSize.getRight());
}
// resume all topics
for (AbstractKafkaConsumer kc : consumers.values()) {
kc.resumeAllPartitions();
}
}
use of org.apache.kafka.clients.consumer.NoOffsetForPartitionException in project nakadi by zalando.
the class NakadiKafkaConsumerTest method whenReadEventsThenNakadiException.
@Test
@SuppressWarnings("unchecked")
public void whenReadEventsThenNakadiException() {
// ARRANGE //
final ImmutableList<RuntimeException> exceptions = ImmutableList.of(new NoOffsetForPartitionException(new TopicPartition("", 0)), new KafkaException());
int numberOfNakadiExceptions = 0;
for (final Exception exception : exceptions) {
final KafkaConsumer<byte[], byte[]> kafkaConsumerMock = mock(KafkaConsumer.class);
when(kafkaConsumerMock.poll(POLL_TIMEOUT)).thenThrow(exception);
try {
// ACT //
final NakadiKafkaConsumer consumer = new NakadiKafkaConsumer(kafkaConsumerMock, ImmutableList.of(), createTpTimelineMap(), POLL_TIMEOUT);
consumer.readEvents();
// ASSERT //
fail("An Exception was expected to be be thrown");
} catch (final Exception e) {
numberOfNakadiExceptions++;
}
}
assertThat("We should get a NakadiException for every call", numberOfNakadiExceptions, equalTo(exceptions.size()));
}
use of org.apache.kafka.clients.consumer.NoOffsetForPartitionException in project kafka by apache.
the class SubscriptionState method resetInitializingPositions.
public synchronized void resetInitializingPositions() {
final Set<TopicPartition> partitionsWithNoOffsets = new HashSet<>();
assignment.forEach((tp, partitionState) -> {
if (partitionState.fetchState.equals(FetchStates.INITIALIZING)) {
if (defaultResetStrategy == OffsetResetStrategy.NONE)
partitionsWithNoOffsets.add(tp);
else
requestOffsetReset(tp);
}
});
if (!partitionsWithNoOffsets.isEmpty())
throw new NoOffsetForPartitionException(partitionsWithNoOffsets);
}
use of org.apache.kafka.clients.consumer.NoOffsetForPartitionException in project kafka by apache.
the class Fetcher method resetOffset.
/**
* Reset offsets for the given partition using the offset reset strategy.
*
* @param partition The given partition that needs reset offset
* @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined
*/
private void resetOffset(TopicPartition partition) {
OffsetResetStrategy strategy = subscriptions.resetStrategy(partition);
log.debug("Resetting offset for partition {} to {} offset.", partition, strategy.name().toLowerCase(Locale.ROOT));
final long timestamp;
if (strategy == OffsetResetStrategy.EARLIEST)
timestamp = ListOffsetRequest.EARLIEST_TIMESTAMP;
else if (strategy == OffsetResetStrategy.LATEST)
timestamp = ListOffsetRequest.LATEST_TIMESTAMP;
else
throw new NoOffsetForPartitionException(partition);
Map<TopicPartition, OffsetData> offsetsByTimes = retrieveOffsetsByTimes(Collections.singletonMap(partition, timestamp), Long.MAX_VALUE, false);
OffsetData offsetData = offsetsByTimes.get(partition);
if (offsetData == null)
throw new NoOffsetForPartitionException(partition);
long offset = offsetData.offset;
// we might lose the assignment while fetching the offset, so check it is still active
if (subscriptions.isAssigned(partition))
this.subscriptions.seek(partition, offset);
}
use of org.apache.kafka.clients.consumer.NoOffsetForPartitionException in project apache-kafka-on-k8s by banzaicloud.
the class SubscriptionState method resetMissingPositions.
public void resetMissingPositions() {
final Set<TopicPartition> partitionsWithNoOffsets = new HashSet<>();
for (PartitionStates.PartitionState<TopicPartitionState> state : assignment.partitionStates()) {
TopicPartition tp = state.topicPartition();
TopicPartitionState partitionState = state.value();
if (partitionState.isMissingPosition()) {
if (defaultResetStrategy == OffsetResetStrategy.NONE)
partitionsWithNoOffsets.add(tp);
else
partitionState.reset(defaultResetStrategy);
}
}
if (!partitionsWithNoOffsets.isEmpty())
throw new NoOffsetForPartitionException(partitionsWithNoOffsets);
}
Aggregations